* [PATCH 1/3] [RFC] FC protocol definition header files
2008-08-08 23:26 [PATCH 0/3][RFC] libfc and fcoe Robert Love
@ 2008-08-08 23:26 ` Robert Love
2008-08-08 23:26 ` [PATCH 2/3] [RFC] libfc: a modular software Fibre Channel implementation Robert Love
2008-08-08 23:26 ` [PATCH 3/3] [RFC] fcoe: Fibre Channel over Ethernet Robert Love
2 siblings, 0 replies; 4+ messages in thread
From: Robert Love @ 2008-08-08 23:26 UTC (permalink / raw)
To: linux-scsi
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Yi Zou <yi.zou@intel.com>
Signed-off-by: Steve Ma <steve.ma@intel.com>
---
include/scsi/fc/fc_els.h | 802 +++++++++++++++++++++++++++++++++++++++++++
include/scsi/fc/fc_encaps.h | 138 +++++++
include/scsi/fc/fc_fc2.h | 124 +++++++
include/scsi/fc/fc_fcp.h | 199 +++++++++++
include/scsi/fc/fc_fs.h | 344 ++++++++++++++++++
include/scsi/fc/fc_gs.h | 93 +++++
include/scsi/fc/fc_ns.h | 159 +++++++++
7 files changed, 1859 insertions(+), 0 deletions(-)
diff --git a/include/scsi/fc/fc_els.h b/include/scsi/fc/fc_els.h
new file mode 100644
index 0000000..af4bf0c
--- /dev/null
+++ b/include/scsi/fc/fc_els.h
@@ -0,0 +1,802 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ELS_H_
+#define _FC_ELS_H_
+
+/*
+ * Fibre Channel Switch - Enhanced Link Services definitions.
+ * From T11 FC-LS Rev 1.2 June 7, 2005.
+ */
+
+/*
+ * ELS Command codes - byte 0 of the frame payload
+ */
+enum fc_els_cmd {
+ ELS_LS_RJT = 0x01, /* ESL reject */
+ ELS_LS_ACC = 0x02, /* ESL Accept */
+ ELS_PLOGI = 0x03, /* N_Port login */
+ ELS_FLOGI = 0x04, /* F_Port login */
+ ELS_LOGO = 0x05, /* Logout */
+ ELS_ABTX = 0x06, /* Abort exchange - obsolete */
+ ELS_RCS = 0x07, /* read connection status */
+ ELS_RES = 0x08, /* read exchange status block */
+ ELS_RSS = 0x09, /* read sequence status block */
+ ELS_RSI = 0x0a, /* read sequence initiative */
+ ELS_ESTS = 0x0b, /* establish streaming */
+ ELS_ESTC = 0x0c, /* estimate credit */
+ ELS_ADVC = 0x0d, /* advise credit */
+ ELS_RTV = 0x0e, /* read timeout value */
+ ELS_RLS = 0x0f, /* read link error status block */
+ ELS_ECHO = 0x10, /* echo */
+ ELS_TEST = 0x11, /* test */
+ ELS_RRQ = 0x12, /* reinstate recovery qualifier */
+ ELS_REC = 0x13, /* read exchange concise */
+ ELS_SRR = 0x14, /* sequence retransmission request */
+ ELS_PRLI = 0x20, /* process login */
+ ELS_PRLO = 0x21, /* process logout */
+ ELS_SCN = 0x22, /* state change notification */
+ ELS_TPLS = 0x23, /* test process login state */
+ ELS_TPRLO = 0x24, /* third party process logout */
+ ELS_LCLM = 0x25, /* login control list mgmt (obs) */
+ ELS_GAID = 0x30, /* get alias_ID */
+ ELS_FACT = 0x31, /* fabric activate alias_id */
+ ELS_FDACDT = 0x32, /* fabric deactivate alias_id */
+ ELS_NACT = 0x33, /* N-port activate alias_id */
+ ELS_NDACT = 0x34, /* N-port deactivate alias_id */
+ ELS_QOSR = 0x40, /* quality of service request */
+ ELS_RVCS = 0x41, /* read virtual circuit status */
+ ELS_PDISC = 0x50, /* discover N_port service params */
+ ELS_FDISC = 0x51, /* discover F_port service params */
+ ELS_ADISC = 0x52, /* discover address */
+ ELS_RNC = 0x53, /* report node cap (obs) */
+ ELS_FARP_REQ = 0x54, /* FC ARP request */
+ ELS_FARP_REPL = 0x55, /* FC ARP reply */
+ ELS_RPS = 0x56, /* read port status block */
+ ELS_RPL = 0x57, /* read port list */
+ ELS_RPBC = 0x58, /* read port buffer condition */
+ ELS_FAN = 0x60, /* fabric address notification */
+ ELS_RSCN = 0x61, /* registered state change notification */
+ ELS_SCR = 0x62, /* state change registration */
+ ELS_RNFT = 0x63, /* report node FC-4 types */
+ ELS_CSR = 0x68, /* clock synch. request */
+ ELS_CSU = 0x69, /* clock synch. update */
+ ELS_LINIT = 0x70, /* loop initialize */
+ ELS_LSTS = 0x72, /* loop status */
+ ELS_RNID = 0x78, /* request node ID data */
+ ELS_RLIR = 0x79, /* registered link incident report */
+ ELS_LIRR = 0x7a, /* link incident record registration */
+ ELS_SRL = 0x7b, /* scan remote loop */
+ ELS_SBRP = 0x7c, /* set bit-error reporting params */
+ ELS_RPSC = 0x7d, /* report speed capabilities */
+ ELS_QSA = 0x7e, /* query security attributes */
+ ELS_EVFP = 0x7f, /* exchange virt. fabrics params */
+ ELS_LKA = 0x80, /* link keep-alive */
+ ELS_AUTH_ELS = 0x90, /* authentication ELS */
+};
+
+/*
+ * Initializer useful for decoding table.
+ * Please keep this in sync with the above definitions.
+ */
+#define FC_ELS_CMDS_INIT { \
+ [ELS_LS_RJT] = "LS_RJT", \
+ [ELS_LS_ACC] = "LS_ACC", \
+ [ELS_PLOGI] = "PLOGI", \
+ [ELS_FLOGI] = "FLOGI", \
+ [ELS_LOGO] = "LOGO", \
+ [ELS_ABTX] = "ABTX", \
+ [ELS_RCS] = "RCS", \
+ [ELS_RES] = "RES", \
+ [ELS_RSS] = "RSS", \
+ [ELS_RSI] = "RSI", \
+ [ELS_ESTS] = "ESTS", \
+ [ELS_ESTC] = "ESTC", \
+ [ELS_ADVC] = "ADVC", \
+ [ELS_RTV] = "RTV", \
+ [ELS_RLS] = "RLS", \
+ [ELS_ECHO] = "ECHO", \
+ [ELS_TEST] = "TEST", \
+ [ELS_RRQ] = "RRQ", \
+ [ELS_REC] = "REC", \
+ [ELS_SRR] = "SRR", \
+ [ELS_PRLI] = "PRLI", \
+ [ELS_PRLO] = "PRLO", \
+ [ELS_SCN] = "SCN", \
+ [ELS_TPLS] = "TPLS", \
+ [ELS_TPRLO] = "TPRLO", \
+ [ELS_LCLM] = "LCLM", \
+ [ELS_GAID] = "GAID", \
+ [ELS_FACT] = "FACT", \
+ [ELS_FDACDT] = "FDACDT", \
+ [ELS_NACT] = "NACT", \
+ [ELS_NDACT] = "NDACT", \
+ [ELS_QOSR] = "QOSR", \
+ [ELS_RVCS] = "RVCS", \
+ [ELS_PDISC] = "PDISC", \
+ [ELS_FDISC] = "FDISC", \
+ [ELS_ADISC] = "ADISC", \
+ [ELS_RNC] = "RNC", \
+ [ELS_FARP_REQ] = "FARP_REQ", \
+ [ELS_FARP_REPL] = "FARP_REPL", \
+ [ELS_RPS] = "RPS", \
+ [ELS_RPL] = "RPL", \
+ [ELS_RPBC] = "RPBC", \
+ [ELS_FAN] = "FAN", \
+ [ELS_RSCN] = "RSCN", \
+ [ELS_SCR] = "SCR", \
+ [ELS_RNFT] = "RNFT", \
+ [ELS_CSR] = "CSR", \
+ [ELS_CSU] = "CSU", \
+ [ELS_LINIT] = "LINIT", \
+ [ELS_LSTS] = "LSTS", \
+ [ELS_RNID] = "RNID", \
+ [ELS_RLIR] = "RLIR", \
+ [ELS_LIRR] = "LIRR", \
+ [ELS_SRL] = "SRL", \
+ [ELS_SBRP] = "SBRP", \
+ [ELS_RPSC] = "RPSC", \
+ [ELS_QSA] = "QSA", \
+ [ELS_EVFP] = "EVFP", \
+ [ELS_LKA] = "LKA", \
+ [ELS_AUTH_ELS] = "AUTH_ELS", \
+}
+
+/*
+ * LS_ACC payload.
+ */
+struct fc_els_ls_acc {
+ __u8 la_cmd; /* command code ELS_LS_ACC */
+ __u8 la_resv[3]; /* reserved */
+};
+
+/*
+ * ELS reject payload.
+ */
+struct fc_els_ls_rjt {
+ __u8 er_cmd; /* command code ELS_LS_RJT */
+ __u8 er_resv[4]; /* reserved must be zero */
+ __u8 er_reason; /* reason (enum fc_els_rjt_reason below) */
+ __u8 er_explan; /* explanation (enum fc_els_rjt_explan below) */
+ __u8 er_vendor; /* vendor specific code */
+};
+
+/*
+ * ELS reject reason codes (er_reason).
+ */
+enum fc_els_rjt_reason {
+ ELS_RJT_NONE = 0, /* no reject - not to be sent */
+ ELS_RJT_INVAL = 0x01, /* invalid ELS command code */
+ ELS_RJT_LOGIC = 0x03, /* logical error */
+ ELS_RJT_BUSY = 0x05, /* logical busy */
+ ELS_RJT_PROT = 0x07, /* protocol error */
+ ELS_RJT_UNAB = 0x09, /* unable to perform command request */
+ ELS_RJT_UNSUP = 0x0b, /* command not supported */
+ ELS_RJT_INPROG = 0x0e, /* command already in progress */
+ ELS_RJT_VENDOR = 0xff, /* vendor specific error */
+};
+
+
+/*
+ * reason code explanation (er_explan).
+ */
+enum fc_els_rjt_explan {
+ ELS_EXPL_NONE = 0x00, /* No additional explanation */
+ ELS_EXPL_SPP_OPT_ERR = 0x01, /* service parameter error - options */
+ ELS_EXPL_SPP_ICTL_ERR = 0x03, /* service parm error - initiator ctl */
+ ELS_EXPL_AH = 0x11, /* invalid association header */
+ ELS_EXPL_AH_REQ = 0x13, /* association_header required */
+ ELS_EXPL_SID = 0x15, /* invalid originator S_ID */
+ ELS_EXPL_OXID_RXID = 0x17, /* invalid OX_ID-RX_ID combination */
+ ELS_EXPL_INPROG = 0x19, /* Request already in progress */
+ ELS_EXPL_PLOGI_REQD = 0x1e, /* N_Port login required */
+ ELS_EXPL_INSUF_RES = 0x29, /* insufficient resources */
+ ELS_EXPL_UNAB_DATA = 0x2a, /* unable to supply requested data */
+ ELS_EXPL_UNSUPR = 0x2c, /* Request not supported */
+ ELS_EXPL_INV_LEN = 0x2d, /* Invalid payload length */
+ /* TBD - above definitions incomplete */
+};
+
+/*
+ * Common service parameters (N ports).
+ */
+struct fc_els_csp {
+ __u8 sp_hi_ver; /* highest version supported (obs.) */
+ __u8 sp_lo_ver; /* highest version supported (obs.) */
+ __be16 sp_bb_cred; /* buffer-to-buffer credits */
+ __be16 sp_features; /* common feature flags */
+ __be16 sp_bb_data; /* b-b state number and data field sz */
+ union {
+ struct {
+ __be16 _sp_tot_seq; /* total concurrent sequences */
+ __be16 _sp_rel_off; /* rel. offset by info cat */
+ } sp_plogi;
+ struct {
+ __be32 _sp_r_a_tov; /* resource alloc. timeout msec */
+ } sp_flogi_acc;
+ } sp_u;
+ __be32 sp_e_d_tov; /* error detect timeout value */
+};
+#define sp_tot_seq sp_u.sp_plogi._sp_tot_seq
+#define sp_rel_off sp_u.sp_plogi._sp_rel_off
+#define sp_r_a_tov sp_u.sp_flogi_acc._sp_r_a_tov
+
+#define FC_SP_BB_DATA_MASK 0xfff /* mask for data field size in sp_bb_data */
+
+/*
+ * Minimum and maximum values for max data field size in service parameters.
+ */
+#define FC_SP_MIN_MAX_PAYLOAD FC_MIN_MAX_PAYLOAD
+#define FC_SP_MAX_MAX_PAYLOAD FC_MAX_PAYLOAD
+
+/*
+ * sp_features
+ */
+#define FC_SP_FT_CIRO 0x8000 /* continuously increasing rel. off. */
+#define FC_SP_FT_CLAD 0x8000 /* clean address (in FLOGI LS_ACC) */
+#define FC_SP_FT_RAND 0x4000 /* random relative offset */
+#define FC_SP_FT_VAL 0x2000 /* valid vendor version level */
+#define FC_SP_FT_FPORT 0x1000 /* F port (1) vs. N port (0) */
+#define FC_SP_FT_ABB 0x0800 /* alternate BB_credit management */
+#define FC_SP_FT_EDTR 0x0400 /* E_D_TOV Resolution is nanoseconds */
+#define FC_SP_FT_MCAST 0x0200 /* multicast */
+#define FC_SP_FT_BCAST 0x0100 /* broadcast */
+#define FC_SP_FT_HUNT 0x0080 /* hunt group */
+#define FC_SP_FT_SIMP 0x0040 /* dedicated simplex */
+#define FC_SP_FT_SEC 0x0020 /* reserved for security */
+#define FC_SP_FT_CSYN 0x0010 /* clock synch. supported */
+#define FC_SP_FT_RTTOV 0x0008 /* R_T_TOV value 100 uS, else 100 mS */
+#define FC_SP_FT_HALF 0x0004 /* dynamic half duplex */
+#define FC_SP_FT_SEQC 0x0002 /* SEQ_CNT */
+#define FC_SP_FT_PAYL 0x0001 /* FLOGI payload length 256, else 116 */
+
+/*
+ * Class-specific service parameters.
+ */
+struct fc_els_cssp {
+ __be16 cp_class; /* class flags */
+ __be16 cp_init; /* initiator flags */
+ __be16 cp_recip; /* recipient flags */
+ __be16 cp_rdfs; /* receive data field size */
+ __be16 cp_con_seq; /* concurrent sequences */
+ __be16 cp_ee_cred; /* N-port end-to-end credit */
+ __u8 cp_resv1; /* reserved */
+ __u8 cp_open_seq; /* open sequences per exchange */
+ __u8 _cp_resv2[2]; /* reserved */
+};
+
+/*
+ * cp_class flags.
+ */
+#define FC_CPC_VALID 0x8000 /* class valid */
+#define FC_CPC_IMIX 0x4000 /* intermix mode */
+#define FC_CPC_SEQ 0x0800 /* sequential delivery */
+#define FC_CPC_CAMP 0x0200 /* camp-on */
+#define FC_CPC_PRI 0x0080 /* priority */
+
+/*
+ * cp_init flags.
+ * (TBD: not all flags defined here).
+ */
+#define FC_CPI_CSYN 0x0010 /* clock synch. capable */
+
+/*
+ * cp_recip flags.
+ */
+#define FC_CPR_CSYN 0x0008 /* clock synch. capable */
+
+/*
+ * NFC_ELS_FLOGI: Fabric login request.
+ * NFC_ELS_PLOGI: Port login request (same format).
+ */
+struct fc_els_flogi {
+ __u8 fl_cmd; /* command */
+ __u8 _fl_resvd[3]; /* must be zero */
+ struct fc_els_csp fl_csp; /* common service parameters */
+ __be64 fl_wwpn; /* port name */
+ __be64 fl_wwnn; /* node name */
+ struct fc_els_cssp fl_cssp[4]; /* class 1-4 service parameters */
+ __u8 fl_vend[16]; /* vendor version level */
+} __attribute__((__packed__));
+
+/*
+ * Process login service parameter page.
+ */
+struct fc_els_spp {
+ __u8 spp_type; /* type code or common service params */
+ __u8 spp_type_ext; /* type code extension */
+ __u8 spp_flags;
+ __u8 _spp_resvd;
+ __be32 spp_orig_pa; /* originator process associator */
+ __be32 spp_resp_pa; /* responder process associator */
+ __be32 spp_params; /* service parameters */
+};
+
+/*
+ * spp_flags.
+ */
+#define FC_SPP_OPA_VAL 0x80 /* originator proc. assoc. valid */
+#define FC_SPP_RPA_VAL 0x40 /* responder proc. assoc. valid */
+#define FC_SPP_EST_IMG_PAIR 0x20 /* establish image pair */
+#define FC_SPP_RESP_MASK 0x0f /* mask for response code (below) */
+
+/*
+ * SPP response code in spp_flags - lower 4 bits.
+ */
+enum fc_els_spp_resp {
+ FC_SPP_RESP_ACK = 1, /* request executed */
+ FC_SPP_RESP_RES = 2, /* unable due to lack of resources */
+ FC_SPP_RESP_INIT = 3, /* initialization not complete */
+ FC_SPP_RESP_NO_PA = 4, /* unknown process associator */
+ FC_SPP_RESP_CONF = 5, /* configuration precludes image pair */
+ FC_SPP_RESP_COND = 6, /* request completed conditionally */
+ FC_SPP_RESP_MULT = 7, /* unable to handle multiple SPPs */
+ FC_SPP_RESP_INVL = 8, /* SPP is invalid */
+};
+
+/*
+ * ELS_RRQ - Reinstate Recovery Qualifier
+ */
+struct fc_els_rrq {
+ __u8 rrq_cmd; /* command (0x12) */
+ __u8 rrq_zero[3]; /* specified as zero - part of cmd */
+ __u8 rrq_resvd; /* reserved */
+ __u8 rrq_s_id[3]; /* originator FID */
+ __be16 rrq_ox_id; /* originator exchange ID */
+ __be16 rrq_rx_id; /* responders exchange ID */
+};
+
+/*
+ * ELS_REC - Read exchange concise.
+ */
+struct fc_els_rec {
+ __u8 rec_cmd; /* command (0x13) */
+ __u8 rec_zero[3]; /* specified as zero - part of cmd */
+ __u8 rec_resvd; /* reserved */
+ __u8 rec_s_id[3]; /* originator FID */
+ __be16 rec_ox_id; /* originator exchange ID */
+ __be16 rec_rx_id; /* responders exchange ID */
+};
+
+/*
+ * ELS_REC LS_ACC payload.
+ */
+struct fc_els_rec_acc {
+ __u8 reca_cmd; /* accept (0x02) */
+ __u8 reca_zero[3]; /* specified as zero - part of cmd */
+ __be16 reca_ox_id; /* originator exchange ID */
+ __be16 reca_rx_id; /* responders exchange ID */
+ __u8 reca_resvd1; /* reserved */
+ __u8 reca_ofid[3]; /* originator FID */
+ __u8 reca_resvd2; /* reserved */
+ __u8 reca_rfid[3]; /* responder FID */
+ __be32 reca_fc4value; /* FC4 value */
+ __be32 reca_e_stat; /* ESB (exchange status block) status */
+};
+
+/*
+ * ELS_PRLI - Process login request and response.
+ */
+struct fc_els_prli {
+ __u8 prli_cmd; /* command */
+ __u8 prli_spp_len; /* length of each serv. parm. page */
+ __be16 prli_len; /* length of entire payload */
+ /* service parameter pages follow */
+};
+
+/*
+ * ELS_LOGO - process or fabric logout.
+ */
+struct fc_els_logo {
+ __u8 fl_cmd; /* command code */
+ __u8 fl_zero[3]; /* specified as zero - part of cmd */
+ __u8 fl_resvd; /* reserved */
+ __u8 fl_n_port_id[3];/* N port ID */
+ __be64 fl_n_port_wwn; /* port name */
+};
+
+/*
+ * ELS_RTV - read timeout value.
+ */
+struct fc_els_rtv {
+ __u8 rtv_cmd; /* command code 0x0e */
+ __u8 rtv_zero[3]; /* specified as zero - part of cmd */
+};
+
+/*
+ * LS_ACC for ELS_RTV - read timeout value.
+ */
+struct fc_els_rtv_acc {
+ __u8 rtv_cmd; /* command code 0x02 */
+ __u8 rtv_zero[3]; /* specified as zero - part of cmd */
+ __be32 rtv_r_a_tov; /* resource allocation timeout value */
+ __be32 rtv_e_d_tov; /* error detection timeout value */
+ __be32 rtv_toq; /* timeout qualifier (see below) */
+};
+
+/*
+ * rtv_toq bits.
+ */
+#define FC_ELS_RTV_EDRES (1 << 26) /* E_D_TOV resolution is nS else mS */
+#define FC_ELS_RTV_RTTOV (1 << 19) /* R_T_TOV is 100 uS else 100 mS */
+
+/*
+ * ELS_SCR - state change registration payload.
+ */
+struct fc_els_scr {
+ __u8 scr_cmd; /* command code */
+ __u8 scr_resv[6]; /* reserved */
+ __u8 scr_reg_func; /* registration function (see below) */
+};
+
+enum fc_els_scr_func {
+ ELS_SCRF_FAB = 1, /* fabric-detected registration */
+ ELS_SCRF_NPORT = 2, /* Nx_Port-detected registration */
+ ELS_SCRF_FULL = 3, /* full registration */
+ ELS_SCRF_CLEAR = 255, /* remove any current registrations */
+};
+
+/*
+ * ELS_RSCN - registered state change notification payload.
+ */
+struct fc_els_rscn {
+ __u8 rscn_cmd; /* RSCN opcode (0x61) */
+ __u8 rscn_page_len; /* page length (4) */
+ __be16 rscn_plen; /* payload length including this word */
+
+ /* followed by 4-byte generic affected Port_ID pages */
+};
+
+struct fc_els_rscn_page {
+ __u8 rscn_page_flags; /* event and address format */
+ __u8 rscn_fid[3]; /* fabric ID */
+};
+
+#define ELS_RSCN_EV_QUAL_BIT 2 /* shift count for event qualifier */
+#define ELS_RSCN_EV_QUAL_MASK 0xf /* mask for event qualifier */
+#define ELS_RSCN_ADDR_FMT_BIT 0 /* shift count for address format */
+#define ELS_RSCN_ADDR_FMT_MASK 0x3 /* mask for address format */
+
+enum fc_els_rscn_ev_qual {
+ ELS_EV_QUAL_NONE = 0, /* unspecified */
+ ELS_EV_QUAL_NS_OBJ = 1, /* changed name server object */
+ ELS_EV_QUAL_PORT_ATTR = 2, /* changed port attribute */
+ ELS_EV_QUAL_SERV_OBJ = 3, /* changed service object */
+ ELS_EV_QUAL_SW_CONFIG = 4, /* changed switch configuration */
+ ELS_EV_QUAL_REM_OBJ = 5, /* removed object */
+};
+
+enum fc_els_rscn_addr_fmt {
+ ELS_ADDR_FMT_PORT = 0, /* rscn_fid is a port address */
+ ELS_ADDR_FMT_AREA = 1, /* rscn_fid is a area address */
+ ELS_ADDR_FMT_DOM = 2, /* rscn_fid is a domain address */
+ ELS_ADDR_FMT_FAB = 3, /* anything on fabric may have changed */
+};
+
+/*
+ * ELS_RNID - request Node ID.
+ */
+struct fc_els_rnid {
+ __u8 rnid_cmd; /* RNID opcode (0x78) */
+ __u8 rnid_resv[3]; /* reserved */
+ __u8 rnid_fmt; /* data format */
+ __u8 rnid_resv2[3]; /* reserved */
+};
+
+/*
+ * Node Identification Data formats (rnid_fmt)
+ */
+enum fc_els_rnid_fmt {
+ ELS_RNIDF_NONE = 0, /* no specific identification data */
+ ELS_RNIDF_GEN = 0xdf, /* general topology discovery format */
+};
+
+/*
+ * ELS_RNID response.
+ */
+struct fc_els_rnid_resp {
+ __u8 rnid_cmd; /* response code (LS_ACC) */
+ __u8 rnid_resv[3]; /* reserved */
+ __u8 rnid_fmt; /* data format */
+ __u8 rnid_cid_len; /* common ID data length */
+ __u8 rnid_resv2; /* reserved */
+ __u8 rnid_sid_len; /* specific ID data length */
+};
+
+struct fc_els_rnid_cid {
+ __be64 rnid_wwpn; /* N port name */
+ __be64 rnid_wwnn; /* node name */
+};
+
+struct fc_els_rnid_gen {
+ __u8 rnid_vend_id[16]; /* vendor-unique ID */
+ __be32 rnid_atype; /* associated type (see below) */
+ __be32 rnid_phys_port; /* physical port number */
+ __be32 rnid_att_nodes; /* number of attached nodes */
+ __u8 rnid_node_mgmt; /* node management (see below) */
+ __u8 rnid_ip_ver; /* IP version (see below) */
+ __be16 rnid_prot_port; /* UDP / TCP port number */
+ __be32 rnid_ip_addr[4]; /* IP address */
+ __u8 rnid_resvd[2]; /* reserved */
+ __be16 rnid_vend_spec; /* vendor-specific field */
+};
+
+enum fc_els_rnid_atype {
+ ELS_RNIDA_UNK = 0x01, /* unknown */
+ ELS_RNIDA_OTHER = 0x02, /* none of the following */
+ ELS_RNIDA_HUB = 0x03,
+ ELS_RNIDA_SWITCH = 0x04,
+ ELS_RNIDA_GATEWAY = 0x05,
+ ELS_RNIDA_CONV = 0x06, /* Obsolete, do not use this value */
+ ELS_RNIDA_HBA = 0x07, /* Obsolete, do not use this value */
+ ELS_RNIDA_PROXY = 0x08, /* Obsolete, do not use this value */
+ ELS_RNIDA_STORAGE = 0x09,
+ ELS_RNIDA_HOST = 0x0a,
+ ELS_RNIDA_SUBSYS = 0x0b, /* storage subsystem (e.g., RAID) */
+ ELS_RNIDA_ACCESS = 0x0e, /* access device (e.g. media changer) */
+ ELS_RNIDA_NAS = 0x11, /* NAS server */
+ ELS_RNIDA_BRIDGE = 0x12, /* bridge */
+ ELS_RNIDA_VIRT = 0x13, /* virtualization device */
+ ELS_RNIDA_MF = 0xff, /* multifunction device (bits below) */
+ ELS_RNIDA_MF_HUB = 1UL << 31, /* hub */
+ ELS_RNIDA_MF_SW = 1UL << 30, /* switch */
+ ELS_RNIDA_MF_GW = 1UL << 29, /* gateway */
+ ELS_RNIDA_MF_ST = 1UL << 28, /* storage */
+ ELS_RNIDA_MF_HOST = 1UL << 27, /* host */
+ ELS_RNIDA_MF_SUB = 1UL << 26, /* storage subsystem */
+ ELS_RNIDA_MF_ACC = 1UL << 25, /* storage access dev */
+ ELS_RNIDA_MF_WDM = 1UL << 24, /* wavelength division mux */
+ ELS_RNIDA_MF_NAS = 1UL << 23, /* NAS server */
+ ELS_RNIDA_MF_BR = 1UL << 22, /* bridge */
+ ELS_RNIDA_MF_VIRT = 1UL << 21, /* virtualization device */
+};
+
+enum fc_els_rnid_mgmt {
+ ELS_RNIDM_SNMP = 0,
+ ELS_RNIDM_TELNET = 1,
+ ELS_RNIDM_HTTP = 2,
+ ELS_RNIDM_HTTPS = 3,
+ ELS_RNIDM_XML = 4, /* HTTP + XML */
+};
+
+enum fc_els_rnid_ipver {
+ ELS_RNIDIP_NONE = 0, /* no IP support or node mgmt. */
+ ELS_RNIDIP_V4 = 1, /* IPv4 */
+ ELS_RNIDIP_V6 = 2, /* IPv6 */
+};
+
+/*
+ * ELS RPL - Read Port List.
+ */
+struct fc_els_rpl {
+ __u8 rpl_cmd; /* command */
+ __u8 rpl_resv[5]; /* reserved - must be zero */
+ __be16 rpl_max_size; /* maximum response size or zero */
+ __u8 rpl_resv1; /* reserved - must be zero */
+ __u8 rpl_index[3]; /* starting index */
+};
+
+/*
+ * Port number block in RPL response.
+ */
+struct fc_els_pnb {
+ __be32 pnb_phys_pn; /* physical port number */
+ __u8 pnb_resv; /* reserved */
+ __u8 pnb_port_id[3]; /* port ID */
+ __be64 pnb_wwpn; /* port name */
+};
+
+/*
+ * RPL LS_ACC response.
+ */
+struct fc_els_rpl_resp {
+ __u8 rpl_cmd; /* ELS_LS_ACC */
+ __u8 rpl_resv1; /* reserved - must be zero */
+ __be16 rpl_plen; /* payload length */
+ __u8 rpl_resv2; /* reserved - must be zero */
+ __u8 rpl_llen[3]; /* list length */
+ __u8 rpl_resv3; /* reserved - must be zero */
+ __u8 rpl_index[3]; /* starting index */
+ struct fc_els_pnb rpl_pnb[1]; /* variable number of PNBs */
+};
+
+/*
+ * Link Error Status Block.
+ */
+struct fc_els_lesb {
+ __be32 lesb_link_fail; /* link failure count */
+ __be32 lesb_sync_loss; /* loss of synchronization count */
+ __be32 lesb_sig_loss; /* loss of signal count */
+ __be32 lesb_prim_err; /* primitive sequence error count */
+ __be32 lesb_inv_word; /* invalid transmission word count */
+ __be32 lesb_inv_crc; /* invalid CRC count */
+};
+
+/*
+ * ELS RPS - Read Port Status Block request.
+ */
+struct fc_els_rps {
+ __u8 rps_cmd; /* command */
+ __u8 rps_resv[2]; /* reserved - must be zero */
+ __u8 rps_flag; /* flag - see below */
+ __be64 rps_port_spec; /* port selection */
+};
+
+enum fc_els_rps_flag {
+ FC_ELS_RPS_DID = 0x00, /* port identified by D_ID of req. */
+ FC_ELS_RPS_PPN = 0x01, /* port_spec is physical port number */
+ FC_ELS_RPS_WWPN = 0x02, /* port_spec is port WWN */
+};
+
+/*
+ * ELS RPS LS_ACC response.
+ */
+struct fc_els_rps_resp {
+ __u8 rps_cmd; /* command - LS_ACC */
+ __u8 rps_resv[2]; /* reserved - must be zero */
+ __u8 rps_flag; /* flag - see below */
+ __u8 rps_resv2[2]; /* reserved */
+ __be16 rps_status; /* port status - see below */
+ struct fc_els_lesb rps_lesb; /* link error status block */
+};
+
+enum fc_els_rps_resp_flag {
+ FC_ELS_RPS_LPEV = 0x01, /* L_port extension valid */
+};
+
+enum fc_els_rps_resp_status {
+ FC_ELS_RPS_PTP = 1 << 5, /* point-to-point connection */
+ FC_ELS_RPS_LOOP = 1 << 4, /* loop mode */
+ FC_ELS_RPS_FAB = 1 << 3, /* fabric present */
+ FC_ELS_RPS_NO_SIG = 1 << 2, /* loss of signal */
+ FC_ELS_RPS_NO_SYNC = 1 << 1, /* loss of synchronization */
+ FC_ELS_RPS_RESET = 1 << 0, /* in link reset protocol */
+};
+
+/*
+ * ELS LIRR - Link Incident Record Registration request.
+ */
+struct fc_els_lirr {
+ __u8 lirr_cmd; /* command */
+ __u8 lirr_resv[3]; /* reserved - must be zero */
+ __u8 lirr_func; /* registration function */
+ __u8 lirr_fmt; /* FC-4 type of RLIR requested */
+ __u8 lirr_resv2[2]; /* reserved - must be zero */
+};
+
+enum fc_els_lirr_func {
+ ELS_LIRR_SET_COND = 0x01, /* set - conditionally receive */
+ ELS_LIRR_SET_UNCOND = 0x02, /* set - unconditionally receive */
+ ELS_LIRR_CLEAR = 0xff /* clear registration */
+};
+
+/*
+ * ELS SRL - Scan Remote Loop request.
+ */
+struct fc_els_srl {
+ __u8 srl_cmd; /* command */
+ __u8 srl_resv[3]; /* reserved - must be zero */
+ __u8 srl_flag; /* flag - see below */
+ __u8 srl_flag_param[3]; /* flag parameter */
+};
+
+enum fc_els_srl_flag {
+ FC_ELS_SRL_ALL = 0x00, /* scan all FL ports */
+ FC_ELS_SRL_ONE = 0x01, /* scan specified loop */
+ FC_ELS_SRL_EN_PER = 0x02, /* enable periodic scanning (param) */
+ FC_ELS_SRL_DIS_PER = 0x03, /* disable periodic scanning */
+};
+
+/*
+ * ELS RLS - Read Link Error Status Block request.
+ */
+struct fc_els_rls {
+ __u8 rls_cmd; /* command */
+ __u8 rls_resv[4]; /* reserved - must be zero */
+ __u8 rls_port_id[3]; /* port ID */
+};
+
+/*
+ * ELS RLS LS_ACC Response.
+ */
+struct fc_els_rls_resp {
+ __u8 rls_cmd; /* ELS_LS_ACC */
+ __u8 rls_resv[3]; /* reserved - must be zero */
+ struct fc_els_lesb rls_lesb; /* link error status block */
+};
+
+/*
+ * ELS RLIR - Registered Link Incident Report.
+ * This is followed by the CLIR and the CLID, described below.
+ */
+struct fc_els_rlir {
+ __u8 rlir_cmd; /* command */
+ __u8 rlir_resv[3]; /* reserved - must be zero */
+ __u8 rlir_fmt; /* format (FC4-type if type specific) */
+ __u8 rlir_clr_len; /* common link incident record length */
+ __u8 rlir_cld_len; /* common link incident desc. length */
+ __u8 rlir_slr_len; /* spec. link incident record length */
+};
+
+/*
+ * CLIR - Common Link Incident Record Data. - Sent via RLIR.
+ */
+struct fc_els_clir {
+ __be64 clir_wwpn; /* incident port name */
+ __be64 clir_wwnn; /* incident port node name */
+ __u8 clir_port_type; /* incident port type */
+ __u8 clir_port_id[3]; /* incident port ID */
+
+ __be64 clir_conn_wwpn; /* connected port name */
+ __be64 clir_conn_wwnn; /* connected node name */
+ __be64 clir_fab_name; /* fabric name */
+ __be32 clir_phys_port; /* physical port number */
+ __be32 clir_trans_id; /* transaction ID */
+ __u8 clir_resv[3]; /* reserved */
+ __u8 clir_ts_fmt; /* time stamp format */
+ __be64 clir_timestamp; /* time stamp */
+};
+
+/*
+ * CLIR clir_ts_fmt - time stamp format values.
+ */
+enum fc_els_clir_ts_fmt {
+ ELS_CLIR_TS_UNKNOWN = 0, /* time stamp field unknown */
+ ELS_CLIR_TS_SEC_FRAC = 1, /* time in seconds and fractions */
+ ELS_CLIR_TS_CSU = 2, /* time in clock synch update format */
+};
+
+/*
+ * Common Link Incident Descriptor - sent via RLIR.
+ */
+struct fc_els_clid {
+ __u8 clid_iq; /* incident qualifier flags */
+ __u8 clid_ic; /* incident code */
+ __be16 clid_epai; /* domain/area of ISL */
+};
+
+/*
+ * CLID incident qualifier flags.
+ */
+enum fc_els_clid_iq {
+ ELS_CLID_SWITCH = 0x20, /* incident port is a switch node */
+ ELS_CLID_E_PORT = 0x10, /* incident is an ISL (E) port */
+ ELS_CLID_SEV_MASK = 0x0c, /* severity 2-bit field mask */
+ ELS_CLID_SEV_INFO = 0x00, /* report is informational */
+ ELS_CLID_SEV_INOP = 0x08, /* link not operational */
+ ELS_CLID_SEV_DEG = 0x04, /* link degraded but operational */
+ ELS_CLID_LASER = 0x02, /* subassembly is a laser */
+ ELS_CLID_FRU = 0x01, /* format can identify a FRU */
+};
+
+/*
+ * CLID incident code.
+ */
+enum fc_els_clid_ic {
+ ELS_CLID_IC_IMPL = 1, /* implicit incident */
+ ELS_CLID_IC_BER = 2, /* bit-error-rate threshold exceeded */
+ ELS_CLID_IC_LOS = 3, /* loss of synch or signal */
+ ELS_CLID_IC_NOS = 4, /* non-operational primitive sequence */
+ ELS_CLID_IC_PST = 5, /* primitive sequence timeout */
+ ELS_CLID_IC_INVAL = 6, /* invalid primitive sequence */
+ ELS_CLID_IC_LOOP_TO = 7, /* loop initialization time out */
+ ELS_CLID_IC_LIP = 8, /* receiving LIP */
+};
+
+#endif /* _FC_ELS_H_ */
diff --git a/include/scsi/fc/fc_encaps.h b/include/scsi/fc/fc_encaps.h
new file mode 100644
index 0000000..f180c3e
--- /dev/null
+++ b/include/scsi/fc/fc_encaps.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+#ifndef _FC_ENCAPS_H_
+#define _FC_ENCAPS_H_
+
+/*
+ * Protocol definitions from RFC 3643 - Fibre Channel Frame Encapsulation.
+ *
+ * Note: The frame length field is the number of 32-bit words in
+ * the encapsulation including the fcip_encaps_header, CRC and EOF words.
+ * The minimum frame length value in bytes is (32 + 24 + 4 + 4) * 4 = 64.
+ * The maximum frame length value in bytes is (32 + 24 + 2112 + 4 + 4) = 2172.
+ */
+#define FC_ENCAPS_MIN_FRAME_LEN 64 /* min frame len (bytes) (see above) */
+#define FC_ENCAPS_MAX_FRAME_LEN (FC_ENCAPS_MIN_FRAME_LEN + FC_MAX_PAYLOAD)
+
+#define FC_ENCAPS_VER 1 /* current version number */
+
+struct fc_encaps_hdr {
+ __u8 fc_proto; /* protocol number */
+ __u8 fc_ver; /* version of encapsulation */
+ __u8 fc_proto_n; /* ones complement of protocol */
+ __u8 fc_ver_n; /* ones complement of version */
+
+ unsigned char fc_proto_data[8]; /* protocol specific data */
+
+ __be16 fc_len_flags; /* 10-bit length/4 w/ 6 flag bits */
+ __be16 fc_len_flags_n; /* ones complement of length / flags */
+
+ /*
+ * Offset 0x10
+ */
+ __be32 fc_time[2]; /* time stamp: seconds and fraction */
+ __be32 fc_crc; /* CRC */
+ __be32 fc_sof; /* start of frame (see FC_SOF below) */
+
+ /* 0x20 - FC frame content followed by EOF word */
+};
+
+#define FCIP_ENCAPS_HDR_LEN 0x20 /* expected length for asserts */
+
+/*
+ * Macro's for making redundant copies of EOF and SOF.
+ */
+#define FC_XY(x, y) ((((x) & 0xff) << 8) | ((y) & 0xff))
+#define FC_XYXY(x, y) ((FCIP_XY(x, y) << 16) | FCIP_XY(x, y))
+#define FC_XYNN(x, y) (FCIP_XYXY(x, y) ^ 0xffff)
+
+#define FC_SOF_ENCODE(n) FC_XYNN(n, n)
+#define FC_EOF_ENCODE(n) FC_XYNN(n, n)
+
+/*
+ * SOF / EOF bytes.
+ */
+enum fc_sof {
+ FC_SOF_F = 0x28, /* fabric */
+ FC_SOF_I4 = 0x29, /* initiate class 4 */
+ FC_SOF_I2 = 0x2d, /* initiate class 2 */
+ FC_SOF_I3 = 0x2e, /* initiate class 3 */
+ FC_SOF_N4 = 0x31, /* normal class 4 */
+ FC_SOF_N2 = 0x35, /* normal class 2 */
+ FC_SOF_N3 = 0x36, /* normal class 3 */
+ FC_SOF_C4 = 0x39, /* activate class 4 */
+} __attribute__((packed));
+
+enum fc_eof {
+ FC_EOF_N = 0x41, /* normal (not last frame of seq) */
+ FC_EOF_T = 0x42, /* terminate (last frame of sequence) */
+ FC_EOF_RT = 0x44,
+ FC_EOF_DT = 0x46, /* disconnect-terminate class-1 */
+ FC_EOF_NI = 0x49, /* normal-invalid */
+ FC_EOF_DTI = 0x4e, /* disconnect-terminate-invalid */
+ FC_EOF_RTI = 0x4f,
+ FC_EOF_A = 0x50, /* abort */
+} __attribute__((packed));
+
+#define FC_SOF_CLASS_MASK 0x06 /* mask for class of service in SOF */
+
+/*
+ * Define classes in terms of the SOF code (initial).
+ */
+enum fc_class {
+ FC_CLASS_NONE = 0, /* software value indicating no class */
+ FC_CLASS_2 = FC_SOF_I2,
+ FC_CLASS_3 = FC_SOF_I3,
+ FC_CLASS_4 = FC_SOF_I4,
+ FC_CLASS_F = FC_SOF_F,
+};
+
+/*
+ * Determine whether SOF code indicates the need for a BLS ACK.
+ */
+static inline int fc_sof_needs_ack(enum fc_sof sof)
+{
+ return (~sof) & 0x02; /* true for class 1, 2, 4, 6, or F */
+}
+
+/*
+ * Given an fc_class, return the normal (non-initial) SOF value.
+ */
+static inline enum fc_sof fc_sof_normal(enum fc_class class)
+{
+ return class + FC_SOF_N3 - FC_SOF_I3; /* diff is always 8 */
+}
+
+/*
+ * Compute class from SOF value.
+ */
+static inline enum fc_class fc_sof_class(enum fc_sof sof)
+{
+ return (sof & 0x7) | FC_SOF_F;
+}
+
+/*
+ * Determine whether SOF is for the initial frame of a sequence.
+ */
+static inline int fc_sof_is_init(enum fc_sof sof)
+{
+ return sof < 0x30;
+}
+
+#endif /* _FC_ENCAPS_H_ */
diff --git a/include/scsi/fc/fc_fc2.h b/include/scsi/fc/fc_fc2.h
new file mode 100644
index 0000000..cff8a8c
--- /dev/null
+++ b/include/scsi/fc/fc_fc2.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FC2_H_
+#define _FC_FC2_H_
+
+/*
+ * Fibre Channel Exchanges and Sequences.
+ */
+#ifndef PACKED
+#define PACKED __attribute__ ((__packed__))
+#endif /* PACKED */
+
+
+/*
+ * Sequence Status Block.
+ * This format is set by the FC-FS standard and is sent over the wire.
+ * Note that the fields aren't all naturally aligned.
+ */
+struct fc_ssb {
+ __u8 ssb_seq_id; /* sequence ID */
+ __u8 _ssb_resvd;
+ __be16 ssb_low_seq_cnt; /* lowest SEQ_CNT */
+
+ __be16 ssb_high_seq_cnt; /* highest SEQ_CNT */
+ __be16 ssb_s_stat; /* sequence status flags */
+
+ __be16 ssb_err_seq_cnt; /* error SEQ_CNT */
+ __u8 ssb_fh_cs_ctl; /* frame header CS_CTL */
+ __be16 ssb_fh_ox_id; /* frame header OX_ID */
+ __be16 ssb_rx_id; /* responder's exchange ID */
+ __u8 _ssb_resvd2[2];
+} PACKED;
+
+/*
+ * The SSB should be 17 bytes. Since it's layout is somewhat strange,
+ * we define the size here so that code can ASSERT that the size comes out
+ * correct.
+ */
+#define FC_SSB_SIZE 17 /* length of fc_ssb for assert */
+
+/*
+ * ssb_s_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
+ */
+#define SSB_ST_RESP (1 << 15) /* sequence responder */
+#define SSB_ST_ACTIVE (1 << 14) /* sequence is active */
+#define SSB_ST_ABNORMAL (1 << 12) /* abnormal ending condition */
+
+#define SSB_ST_REQ_MASK (3 << 10) /* ACK, abort sequence condition */
+#define SSB_ST_REQ_CONT (0 << 10)
+#define SSB_ST_REQ_ABORT (1 << 10)
+#define SSB_ST_REQ_STOP (2 << 10)
+#define SSB_ST_REQ_RETRANS (3 << 10)
+
+#define SSB_ST_ABTS (1 << 9) /* ABTS protocol completed */
+#define SSB_ST_RETRANS (1 << 8) /* retransmission completed */
+#define SSB_ST_TIMEOUT (1 << 7) /* sequence timed out by recipient */
+#define SSB_ST_P_RJT (1 << 6) /* P_RJT transmitted */
+
+#define SSB_ST_CLASS_BIT 4 /* class of service field LSB */
+#define SSB_ST_CLASS_MASK 3 /* class of service mask */
+#define SSB_ST_ACK (1 << 3) /* ACK (EOFt or EOFdt) transmitted */
+
+/*
+ * Exchange Status Block.
+ * This format is set by the FC-FS standard and is sent over the wire.
+ * Note that the fields aren't all naturally aligned.
+ */
+struct fc_esb {
+ __u8 esb_cs_ctl; /* CS_CTL for frame header */
+ __be16 esb_ox_id; /* originator exchange ID */
+ __be16 esb_rx_id; /* responder exchange ID */
+ __be32 esb_orig_fid; /* fabric ID of originator */
+ __be32 esb_resp_fid; /* fabric ID of responder */
+ __be32 esb_e_stat; /* status */
+ __u8 _esb_resvd[4];
+ __u8 esb_service_params[112]; /* TBD */
+ __u8 esb_seq_status[8]; /* sequence statuses, 8 bytes each */
+} __attribute__((packed));;
+
+
+/*
+ * Define expected size for ASSERTs.
+ * See comments on FC_SSB_SIZE.
+ */
+#define FC_ESB_SIZE (1 + 5*4 + 112 + 8) /* expected size */
+
+/*
+ * esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
+ */
+#define ESB_ST_RESP (1 << 31) /* responder to exchange */
+#define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiaive */
+#define ESB_ST_COMPLETE (1 << 29) /* exchange is complete */
+#define ESB_ST_ABNORMAL (1 << 28) /* abnormal ending condition */
+#define ESB_ST_REC_QUAL (1 << 26) /* recovery qualifier active */
+
+#define ESB_ST_ERRP_BIT 24 /* LSB for error policy */
+#define ESB_ST_ERRP_MASK (3 << 24) /* mask for error policy */
+#define ESB_ST_ERRP_MULT (0 << 24) /* abort, discard multiple sequences */
+#define ESB_ST_ERRP_SING (1 << 24) /* abort, discard single sequence */
+#define ESB_ST_ERRP_INF (2 << 24) /* process with infinite buffers */
+#define ESB_ST_ERRP_IMM (3 << 24) /* discard mult. with immed. retran. */
+
+#define ESB_ST_OX_ID_INVL (1 << 23) /* originator XID invalid */
+#define ESB_ST_RX_ID_INVL (1 << 22) /* responder XID invalid */
+#define ESB_ST_PRI_INUSE (1 << 21) /* priority / preemption in use */
+
+#endif /* _FC_FC2_H_ */
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
new file mode 100644
index 0000000..5d38f19
--- /dev/null
+++ b/include/scsi/fc/fc_fcp.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FCP_H_
+#define _FC_FCP_H_
+
+/*
+ * Fibre Channel Protocol for SCSI.
+ * From T10 FCP-3, T10 project 1560-D Rev 4, Sept. 13, 2005.
+ */
+
+/*
+ * fc/fs.h defines FC_TYPE_FCP.
+ */
+
+/*
+ * Service parameter page parameters (word 3 bits) for Process Login.
+ */
+#define FCP_SPPF_TASK_RETRY_ID 0x0200 /* task retry ID requested */
+#define FCP_SPPF_RETRY 0x0100 /* retry supported */
+#define FCP_SPPF_CONF_COMPL 0x0080 /* confirmed completion allowed */
+#define FCP_SPPF_OVLY_ALLOW 0x0040 /* data overlay allowed */
+#define FCP_SPPF_INIT_FCN 0x0020 /* initiator function */
+#define FCP_SPPF_TARG_FCN 0x0010 /* target function */
+#define FCP_SPPF_RD_XRDY_DIS 0x0002 /* disable XFER_RDY for reads */
+#define FCP_SPPF_WR_XRDY_DIS 0x0001 /* disable XFER_RDY for writes */
+
+/*
+ * FCP_CMND IU Payload.
+ */
+struct fcp_cmnd {
+ __u8 fc_lun[8]; /* logical unit number */
+ __u8 fc_cmdref; /* commmand reference number */
+ __u8 fc_pri_ta; /* priority and task attribute */
+ __u8 fc_tm_flags; /* task management flags */
+ __u8 fc_flags; /* additional len & flags */
+ __u8 fc_cdb[16]; /* base CDB */
+ __be32 fc_dl; /* data length (must follow fc_cdb) */
+};
+
+#define FCP_CMND_LEN 32 /* expected length of structure */
+
+struct fcp_cmnd32 {
+ __u8 fc_lun[8]; /* logical unit number */
+ __u8 fc_cmdref; /* commmand reference number */
+ __u8 fc_pri_ta; /* priority and task attribute */
+ __u8 fc_tm_flags; /* task management flags */
+ __u8 fc_flags; /* additional len & flags */
+ __u8 fc_cdb[32]; /* base CDB */
+ __be32 fc_dl; /* data length (must follow fc_cdb) */
+};
+
+#define FCP_CMND32_LEN 48 /* expected length of structure */
+#define FCP_CMND32_ADD_LEN (16 / 4) /* Additional cdb length */
+
+/*
+ * fc_pri_ta.
+ */
+#define FCP_PTA_SIMPLE 0 /* simple task attribute */
+#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCP_PTA_ORDERED 2 /* ordered task attribute */
+#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
+
+/*
+ * fc_tm_flags - task management flags field.
+ */
+#define FCP_TMF_CLR_ACA 0x40 /* clear ACA condition */
+#define FCP_TMF_LUN_RESET 0x10 /* logical unit reset task management */
+#define FCP_TMF_CLR_TASK_SET 0x04 /* clear task set */
+#define FCP_TMF_ABT_TASK_SET 0x02 /* abort task set */
+
+/*
+ * fc_flags.
+ * Bits 7:2 are the additional FCP_CDB length / 4.
+ */
+#define FCP_CFL_LEN_MASK 0xfc /* mask for additional length */
+#define FCP_CFL_LEN_SHIFT 2 /* shift bits for additional length */
+#define FCP_CFL_RDDATA 0x02 /* read data */
+#define FCP_CFL_WRDATA 0x01 /* write data */
+
+/*
+ * FCP_TXRDY IU - transfer ready payload.
+ */
+struct fcp_txrdy {
+ __be32 ft_data_ro; /* data relative offset */
+ __be32 ft_burst_len; /* burst length */
+ __u8 _ft_resvd[4]; /* reserved */
+};
+
+#define FCP_TXRDY_LEN 12 /* expected length of structure */
+
+/*
+ * FCP_RESP IU - response payload.
+ *
+ * The response payload comes in three parts: the flags/status, the
+ * sense/response lengths and the sense data/response info section.
+ *
+ * From FCP3r04, note 6 of section 9.5.13:
+ *
+ * Some early implementations presented the FCP_RSP IU without the FCP_RESID,
+ * FCP_SNS_LEN, and FCP_RSP_LEN fields if the FCP_RESID_UNDER, FCP_RESID_OVER,
+ * FCP_SNS_LEN_VALID, and FCP_RSP_LEN_VALID bits were all set to zero. This
+ * non-standard behavior should be tolerated.
+ *
+ * All response frames will always contain the fcp_resp template. Some
+ * will also include the fcp_resp_len template.
+ */
+struct fcp_resp {
+ __u8 _fr_resvd[8]; /* reserved */
+ __be16 fr_retry_delay; /* retry delay timer */
+ __u8 fr_flags; /* flags */
+ __u8 fr_status; /* SCSI status code */
+};
+
+#define FCP_RESP_LEN 12 /* expected length of structure */
+
+struct fcp_resp_ext {
+ __be32 fr_resid; /* Residual value */
+ __be32 fr_sns_len; /* SCSI Sense length */
+ __be32 fr_rsp_len; /* Response Info length */
+
+ /*
+ * Optionally followed by RSP info and/or SNS info and/or
+ * bidirectional read residual length, if any.
+ */
+};
+
+#define FCP_RESP_EXT_LEN 12 /* expected length of the structure */
+
+struct fcp_resp_rsp_info {
+ __u8 _fr_resvd[3]; /* reserved */
+ __u8 rsp_code; /* Response Info Code */
+ __u8 _fr_resvd2[4]; /* reserved */
+};
+
+struct fcp_resp_with_ext {
+ struct fcp_resp resp;
+ struct fcp_resp_ext ext;
+};
+
+#define FCP_RESP_WITH_EXT (FCP_RESP_LEN + FCP_RESP_EXT_LEN)
+
+/*
+ * fr_flags.
+ */
+#define FCP_BIDI_RSP 0x80 /* bidirectional read response */
+#define FCP_BIDI_READ_UNDER 0x40 /* bidir. read less than requested */
+#define FCP_BIDI_READ_OVER 0x20 /* DL insufficient for full transfer */
+#define FCP_CONF_REQ 0x10 /* confirmation requested */
+#define FCP_RESID_UNDER 0x08 /* transfer shorter than expected */
+#define FCP_RESID_OVER 0x04 /* DL insufficient for full transfer */
+#define FCP_SNS_LEN_VAL 0x02 /* SNS_LEN field is valid */
+#define FCP_RSP_LEN_VAL 0x01 /* RSP_LEN field is valid */
+
+/*
+ * rsp_codes
+ */
+enum fcp_resp_rsp_codes {
+ FCP_TMF_CMPL = 0,
+ FCP_DATA_LEN_INVALID = 1,
+ FCP_CMND_FIELDS_INVALID = 2,
+ FCP_DATA_PARAM_MISMATCH = 3,
+ FCP_TMF_REJECTED = 4,
+ FCP_TMF_FAILED = 5,
+ FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * FCP SRR Link Service request - Sequence Retransmission Request.
+ */
+struct fcp_srr {
+ __u8 srr_op; /* opcode ELS_SRR */
+ __u8 srr_resvd[3]; /* opcode / reserved - must be zero */
+ __be16 srr_ox_id; /* OX_ID of failed command */
+ __be16 srr_rx_id; /* RX_ID of failed command */
+ __be32 srr_rel_off; /* relative offset */
+ __u8 srr_r_ctl; /* r_ctl for the information unit */
+ __u8 srr_resvd2[3]; /* reserved */
+};
+
+#endif /* _FC_FCP_H_ */
diff --git a/include/scsi/fc/fc_fs.h b/include/scsi/fc/fc_fs.h
new file mode 100644
index 0000000..ba6df64
--- /dev/null
+++ b/include/scsi/fc/fc_fs.h
@@ -0,0 +1,344 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FS_H_
+#define _FC_FS_H_
+
+/*
+ * Fibre Channel Framing and Signalling definitions.
+ * From T11 FC-FS-2 Rev 0.90 - 9 August 2005.
+ */
+
+/*
+ * Frame header
+ */
+struct fc_frame_header {
+ __u8 fh_r_ctl; /* routing control */
+ __u8 fh_d_id[3]; /* Destination ID */
+
+ __u8 fh_cs_ctl; /* class of service control / pri */
+ __u8 fh_s_id[3]; /* Source ID */
+
+ __u8 fh_type; /* see enum fc_fh_type below */
+ __u8 fh_f_ctl[3]; /* frame control */
+
+ __u8 fh_seq_id; /* sequence ID */
+ __u8 fh_df_ctl; /* data field control */
+ __be16 fh_seq_cnt; /* sequence count */
+
+ __be16 fh_ox_id; /* originator exchange ID */
+ __be16 fh_rx_id; /* responder exchange ID */
+ __be32 fh_parm_offset; /* parameter or relative offset */
+};
+
+#define FC_FRAME_HEADER_LEN 24 /* expected length of structure */
+
+#define FC_MAX_PAYLOAD 2112U /* max payload length in bytes */
+#define FC_MIN_MAX_PAYLOAD 256U /* lower limit on max payload */
+
+#define FC_MAX_FRAME (FC_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
+#define FC_MIN_MAX_FRAME (FC_MIN_MAX_PAYLOAD + FC_FRAME_HEADER_LEN)
+
+/*
+ * fh_r_ctl - Routing control definitions.
+ */
+ /*
+ * FC-4 device_data.
+ */
+enum fc_rctl {
+ FC_RCTL_DD_UNCAT = 0x00, /* uncategorized information */
+ FC_RCTL_DD_SOL_DATA = 0x01, /* solicited data */
+ FC_RCTL_DD_UNSOL_CTL = 0x02, /* unsolicited control */
+ FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */
+ FC_RCTL_DD_UNSOL_DATA = 0x04, /* unsolicited data */
+ FC_RCTL_DD_DATA_DESC = 0x05, /* data descriptor */
+ FC_RCTL_DD_UNSOL_CMD = 0x06, /* unsolicited command */
+ FC_RCTL_DD_CMD_STATUS = 0x07, /* command status */
+
+#define FC_RCTL_ILS_REQ FC_RCTL_DD_UNSOL_CTL /* ILS request */
+#define FC_RCTL_ILS_REP FC_RCTL_DD_SOL_CTL /* ILS reply */
+
+ /*
+ * Extended Link_Data
+ */
+ FC_RCTL_ELS_REQ = 0x22, /* extended link services request */
+ FC_RCTL_ELS_REP = 0x23, /* extended link services reply */
+ FC_RCTL_ELS4_REQ = 0x32, /* FC-4 ELS request */
+ FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */
+ /*
+ * Basic Link Services fh_r_ctl values.
+ */
+ FC_RCTL_BA_NOP = 0x80, /* basic link service NOP */
+ FC_RCTL_BA_ABTS = 0x81, /* basic link service abort */
+ FC_RCTL_BA_RMC = 0x82, /* remove connection */
+ FC_RCTL_BA_ACC = 0x84, /* basic accept */
+ FC_RCTL_BA_RJT = 0x85, /* basic reject */
+ FC_RCTL_BA_PRMT = 0x86, /* dedicated connection preempted */
+ /*
+ * Link Control Information.
+ */
+ FC_RCTL_ACK_1 = 0xc0, /* acknowledge_1 */
+ FC_RCTL_ACK_0 = 0xc1, /* acknowledge_0 */
+ FC_RCTL_P_RJT = 0xc2, /* port reject */
+ FC_RCTL_F_RJT = 0xc3, /* fabric reject */
+ FC_RCTL_P_BSY = 0xc4, /* port busy */
+ FC_RCTL_F_BSY = 0xc5, /* fabric busy to data frame */
+ FC_RCTL_F_BSYL = 0xc6, /* fabric busy to link control frame */
+ FC_RCTL_LCR = 0xc7, /* link credit reset */
+ FC_RCTL_END = 0xc9, /* end */
+};
+ /* incomplete list of definitions */
+
+/*
+ * R_CTL names initializer.
+ * Please keep this matching the above definitions.
+ */
+#define FC_RCTL_NAMES_INIT { \
+ [FC_RCTL_DD_UNCAT] = "uncat", \
+ [FC_RCTL_DD_SOL_DATA] = "sol data", \
+ [FC_RCTL_DD_UNSOL_CTL] = "unsol ctl", \
+ [FC_RCTL_DD_SOL_CTL] = "sol ctl/reply", \
+ [FC_RCTL_DD_UNSOL_DATA] = "unsol data", \
+ [FC_RCTL_DD_DATA_DESC] = "data desc", \
+ [FC_RCTL_DD_UNSOL_CMD] = "unsol cmd", \
+ [FC_RCTL_DD_CMD_STATUS] = "cmd status", \
+ [FC_RCTL_ELS_REQ] = "ELS req", \
+ [FC_RCTL_ELS_REP] = "ELS rep", \
+ [FC_RCTL_ELS4_REQ] = "FC-4 ELS req", \
+ [FC_RCTL_ELS4_REP] = "FC-4 ELS rep", \
+ [FC_RCTL_BA_NOP] = "BLS NOP", \
+ [FC_RCTL_BA_ABTS] = "BLS abort", \
+ [FC_RCTL_BA_RMC] = "BLS remove connection", \
+ [FC_RCTL_BA_ACC] = "BLS accept", \
+ [FC_RCTL_BA_RJT] = "BLS reject", \
+ [FC_RCTL_BA_PRMT] = "BLS dedicated connection preempted", \
+ [FC_RCTL_ACK_1] = "LC ACK_1", \
+ [FC_RCTL_ACK_0] = "LC ACK_0", \
+ [FC_RCTL_P_RJT] = "LC port reject", \
+ [FC_RCTL_F_RJT] = "LC fabric reject", \
+ [FC_RCTL_P_BSY] = "LC port busy", \
+ [FC_RCTL_F_BSY] = "LC fabric busy to data frame", \
+ [FC_RCTL_F_BSYL] = "LC fabric busy to link control frame",\
+ [FC_RCTL_LCR] = "LC link credit reset", \
+ [FC_RCTL_END] = "LC end", \
+}
+
+/*
+ * Well-known fabric addresses.
+ */
+enum fc_well_known_fid {
+ FC_FID_BCAST = 0xffffff, /* broadcast */
+ FC_FID_FLOGI = 0xfffffe, /* fabric login */
+ FC_FID_FCTRL = 0xfffffd, /* fabric controller */
+ FC_FID_DIR_SERV = 0xfffffc, /* directory server */
+ FC_FID_TIME_SERV = 0xfffffb, /* time server */
+ FC_FID_MGMT_SERV = 0xfffffa, /* management server */
+ FC_FID_QOS = 0xfffff9, /* QoS Facilitator */
+ FC_FID_ALIASES = 0xfffff8, /* alias server (FC-PH2) */
+ FC_FID_SEC_KEY = 0xfffff7, /* Security key dist. server */
+ FC_FID_CLOCK = 0xfffff6, /* clock synch server */
+ FC_FID_MCAST_SERV = 0xfffff5, /* multicast server */
+};
+
+#define FC_FID_WELL_KNOWN_MAX 0xffffff /* highest well-known fabric ID */
+#define FC_FID_WELL_KNOWN_BASE 0xfffff5 /* start of well-known fabric ID */
+
+/*
+ * Other well-known addresses, outside the above contiguous range.
+ */
+#define FC_FID_DOM_MGR 0xfffc00 /* domain manager base */
+
+/*
+ * Fabric ID bytes.
+ */
+#define FC_FID_DOMAIN 0
+#define FC_FID_PORT 1
+#define FC_FID_LINK 2
+
+/*
+ * fh_type codes
+ */
+enum fc_fh_type {
+ FC_TYPE_BLS = 0x00, /* basic link service */
+ FC_TYPE_ELS = 0x01, /* extended link service */
+ FC_TYPE_IP = 0x05, /* IP over FC, RFC 4338 */
+ FC_TYPE_FCP = 0x08, /* SCSI FCP */
+ FC_TYPE_CT = 0x20, /* Fibre Channel Services (FC-CT) */
+ FC_TYPE_ILS = 0x22, /* internal link service */
+};
+
+/*
+ * FC_TYPE names initializer.
+ * Please keep this matching the above definitions.
+ */
+#define FC_TYPE_NAMES_INIT { \
+ [FC_TYPE_BLS] = "BLS", \
+ [FC_TYPE_ELS] = "ELS", \
+ [FC_TYPE_IP] = "IP", \
+ [FC_TYPE_FCP] = "FCP", \
+ [FC_TYPE_CT] = "CT", \
+ [FC_TYPE_ILS] = "ILS", \
+}
+
+/*
+ * Exchange IDs.
+ */
+#define FC_XID_UNKNOWN 0xffff /* unknown exchange ID */
+
+/*
+ * fh_f_ctl - Frame control flags.
+ */
+#define FC_FC_EX_CTX (1 << 23) /* sent by responder to exchange */
+#define FC_FC_SEQ_CTX (1 << 22) /* sent by responder to sequence */
+#define FC_FC_FIRST_SEQ (1 << 21) /* first sequence of this exchange */
+#define FC_FC_LAST_SEQ (1 << 20) /* last sequence of this exchange */
+#define FC_FC_END_SEQ (1 << 19) /* last frame of sequence */
+#define FC_FC_END_CONN (1 << 18) /* end of class 1 connection pending */
+#define FC_FC_RES_B17 (1 << 17) /* reserved */
+#define FC_FC_SEQ_INIT (1 << 16) /* transfer of sequence initiative */
+#define FC_FC_X_ID_REASS (1 << 15) /* exchange ID has been changed */
+#define FC_FC_X_ID_INVAL (1 << 14) /* exchange ID invalidated */
+
+#define FC_FC_ACK_1 (1 << 12) /* 13:12 = 1: ACK_1 expected */
+#define FC_FC_ACK_N (2 << 12) /* 13:12 = 2: ACK_N expected */
+#define FC_FC_ACK_0 (3 << 12) /* 13:12 = 3: ACK_0 expected */
+
+#define FC_FC_RES_B11 (1 << 11) /* reserved */
+#define FC_FC_RES_B10 (1 << 10) /* reserved */
+#define FC_FC_RETX_SEQ (1 << 9) /* retransmitted sequence */
+#define FC_FC_UNI_TX (1 << 8) /* unidirectional transmit (class 1) */
+#define FC_FC_CONT_SEQ(i) ((i) << 6)
+#define FC_FC_ABT_SEQ(i) ((i) << 4)
+#define FC_FC_REL_OFF (1 << 3) /* parameter is relative offset */
+#define FC_FC_RES2 (1 << 2) /* reserved */
+#define FC_FC_FILL(i) ((i) & 3) /* 1:0: bytes of trailing fill */
+
+/*
+ * BA_ACC payload.
+ */
+struct fc_ba_acc {
+ __u8 ba_seq_id_val; /* SEQ_ID validity */
+#define FC_BA_SEQ_ID_VAL 0x80
+ __u8 ba_seq_id; /* SEQ_ID of seq last deliverable */
+ __u8 ba_resvd[2]; /* reserved */
+ __be16 ba_ox_id; /* OX_ID for aborted seq or exch */
+ __be16 ba_rx_id; /* RX_ID for aborted seq or exch */
+ __be16 ba_low_seq_cnt; /* low SEQ_CNT of aborted seq */
+ __be16 ba_high_seq_cnt; /* high SEQ_CNT of aborted seq */
+};
+
+/*
+ * BA_RJT: Basic Reject payload.
+ */
+struct fc_ba_rjt {
+ __u8 br_resvd; /* reserved */
+ __u8 br_reason; /* reason code */
+ __u8 br_explan; /* reason explanation */
+ __u8 br_vendor; /* vendor unique code */
+};
+
+/*
+ * BA_RJT reason codes.
+ * From FS-2.
+ */
+enum fc_ba_rjt_reason {
+ FC_BA_RJT_NONE = 0, /* in software this means no reject */
+ FC_BA_RJT_INVL_CMD = 0x01, /* invalid command code */
+ FC_BA_RJT_LOG_ERR = 0x03, /* logical error */
+ FC_BA_RJT_LOG_BUSY = 0x05, /* logical busy */
+ FC_BA_RJT_PROTO_ERR = 0x07, /* protocol error */
+ FC_BA_RJT_UNABLE = 0x09, /* unable to perform request */
+ FC_BA_RJT_VENDOR = 0xff, /* vendor-specific (see br_vendor) */
+};
+
+/*
+ * BA_RJT reason code explanations.
+ */
+enum fc_ba_rjt_explan {
+ FC_BA_RJT_EXP_NONE = 0x00, /* no additional expanation */
+ FC_BA_RJT_INV_XID = 0x03, /* invalid OX_ID-RX_ID combination */
+ FC_BA_RJT_ABT = 0x05, /* sequence aborted, no seq info */
+};
+
+/*
+ * P_RJT or F_RJT: Port Reject or Fabric Reject parameter field.
+ */
+struct fc_pf_rjt {
+ __u8 rj_action; /* reserved */
+ __u8 rj_reason; /* reason code */
+ __u8 rj_resvd; /* reserved */
+ __u8 rj_vendor; /* vendor unique code */
+};
+
+/*
+ * P_RJT and F_RJT reject reason codes.
+ */
+enum fc_pf_rjt_reason {
+ FC_RJT_NONE = 0, /* non-reject (reserved by standard) */
+ FC_RJT_INVL_DID = 0x01, /* invalid destination ID */
+ FC_RJT_INVL_SID = 0x02, /* invalid source ID */
+ FC_RJT_P_UNAV_T = 0x03, /* port unavailable, temporary */
+ FC_RJT_P_UNAV = 0x04, /* port unavailable, permanent */
+ FC_RJT_CLS_UNSUP = 0x05, /* class not supported */
+ FC_RJT_DEL_USAGE = 0x06, /* delimiter usage error */
+ FC_RJT_TYPE_UNSUP = 0x07, /* type not supported */
+ FC_RJT_LINK_CTL = 0x08, /* invalid link control */
+ FC_RJT_R_CTL = 0x09, /* invalid R_CTL field */
+ FC_RJT_F_CTL = 0x0a, /* invalid F_CTL field */
+ FC_RJT_OX_ID = 0x0b, /* invalid originator exchange ID */
+ FC_RJT_RX_ID = 0x0c, /* invalid responder exchange ID */
+ FC_RJT_SEQ_ID = 0x0d, /* invalid sequence ID */
+ FC_RJT_DF_CTL = 0x0e, /* invalid DF_CTL field */
+ FC_RJT_SEQ_CNT = 0x0f, /* invalid SEQ_CNT field */
+ FC_RJT_PARAM = 0x10, /* invalid parameter field */
+ FC_RJT_EXCH_ERR = 0x11, /* exchange error */
+ FC_RJT_PROTO = 0x12, /* protocol error */
+ FC_RJT_LEN = 0x13, /* incorrect length */
+ FC_RJT_UNEXP_ACK = 0x14, /* unexpected ACK */
+ FC_RJT_FAB_CLASS = 0x15, /* class unsupported by fabric entity */
+ FC_RJT_LOGI_REQ = 0x16, /* login required */
+ FC_RJT_SEQ_XS = 0x17, /* excessive sequences attempted */
+ FC_RJT_EXCH_EST = 0x18, /* unable to establish exchange */
+ FC_RJT_FAB_UNAV = 0x1a, /* fabric unavailable */
+ FC_RJT_VC_ID = 0x1b, /* invalid VC_ID (class 4) */
+ FC_RJT_CS_CTL = 0x1c, /* invalid CS_CTL field */
+ FC_RJT_INSUF_RES = 0x1d, /* insuff. resources for VC (Class 4) */
+ FC_RJT_INVL_CLS = 0x1f, /* invalid class of service */
+ FC_RJT_PREEMT_RJT = 0x20, /* preemption request rejected */
+ FC_RJT_PREEMT_DIS = 0x21, /* preemption not enabled */
+ FC_RJT_MCAST_ERR = 0x22, /* multicast error */
+ FC_RJT_MCAST_ET = 0x23, /* multicast error terminate */
+ FC_RJT_PRLI_REQ = 0x24, /* process login required */
+ FC_RJT_INVL_ATT = 0x25, /* invalid attachment */
+ FC_RJT_VENDOR = 0xff, /* vendor specific reject */
+};
+
+/*
+ * Data descriptor format (R_CTL == FC_RCTL_DD_DATA_DESC).
+ * This is used for FCP SCSI transfer ready.
+ */
+struct fc_data_desc {
+ __be32 dd_offset; /* data relative offset in bytes */
+ __be32 dd_len; /* transfer buffer size in bytes */
+ __u8 _dd_resvd[4];
+};
+
+#define FC_DATA_DESC_LEN 12 /* expected length of structure */
+
+#endif /* _FC_FS_H_ */
diff --git a/include/scsi/fc/fc_gs.h b/include/scsi/fc/fc_gs.h
new file mode 100644
index 0000000..ffab027
--- /dev/null
+++ b/include/scsi/fc/fc_gs.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_GS_H_
+#define _FC_GS_H_
+
+/*
+ * Fibre Channel Services - Common Transport.
+ * From T11.org FC-GS-2 Rev 5.3 November 1998.
+ */
+
+struct fc_ct_hdr {
+ __u8 ct_rev; /* revision */
+ __u8 ct_in_id[3]; /* N_Port ID of original requestor */
+ __u8 ct_fs_type; /* type of fibre channel service */
+ __u8 ct_fs_subtype; /* subtype */
+ __u8 ct_options;
+ __u8 _ct_resvd1;
+ __be16 ct_cmd; /* command / response code */
+ __be16 ct_mr_size; /* maximum / residual size */
+ __u8 _ct_resvd2;
+ __u8 ct_reason; /* reject reason */
+ __u8 ct_explan; /* reason code explanation */
+ __u8 ct_vendor; /* vendor unique data */
+};
+
+#define FC_CT_HDR_LEN 16 /* expected sizeof (struct fc_ct_hdr) */
+
+enum fc_ct_rev {
+ FC_CT_REV = 1 /* common transport revision */
+};
+
+/*
+ * ct_fs_type values.
+ */
+enum fc_ct_fs_type {
+ FC_FST_ALIAS = 0xf8, /* alias service */
+ FC_FST_MGMT = 0xfa, /* management service */
+ FC_FST_TIME = 0xfb, /* time service */
+ FC_FST_DIR = 0xfc, /* directory service */
+};
+
+/*
+ * ct_cmd: Command / response codes
+ */
+enum fc_ct_cmd {
+ FC_FS_RJT = 0x8001, /* reject */
+ FC_FS_ACC = 0x8002, /* accept */
+};
+
+/*
+ * FS_RJT reason codes.
+ */
+enum fc_ct_reason {
+ FC_FS_RJT_CMD = 0x01, /* invalid command code */
+ FC_FS_RJT_VER = 0x02, /* invalid version level */
+ FC_FS_RJT_LOG = 0x03, /* logical error */
+ FC_FS_RJT_IUSIZ = 0x04, /* invalid IU size */
+ FC_FS_RJT_BSY = 0x05, /* logical busy */
+ FC_FS_RJT_PROTO = 0x07, /* protocol error */
+ FC_FS_RJT_UNABL = 0x09, /* unable to perform command request */
+ FC_FS_RJT_UNSUP = 0x0b, /* command not supported */
+};
+
+/*
+ * FS_RJT reason code explanations.
+ */
+enum fc_ct_explan {
+ FC_FS_EXP_NONE = 0x00, /* no additional explanation */
+ FC_FS_EXP_PID = 0x01, /* port ID not registered */
+ FC_FS_EXP_PNAM = 0x02, /* port name not registered */
+ FC_FS_EXP_NNAM = 0x03, /* node name not registered */
+ FC_FS_EXP_COS = 0x04, /* class of service not registered */
+ /* definitions not complete */
+};
+
+#endif /* _FC_GS_H_ */
diff --git a/include/scsi/fc/fc_ns.h b/include/scsi/fc/fc_ns.h
new file mode 100644
index 0000000..790d7b9
--- /dev/null
+++ b/include/scsi/fc/fc_ns.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_NS_H_
+#define _FC_NS_H_
+
+/*
+ * Fibre Channel Services - Name Service (dNS)
+ * From T11.org FC-GS-2 Rev 5.3 November 1998.
+ */
+
+/*
+ * Common-transport sub-type for Name Server.
+ */
+#define FC_NS_SUBTYPE 2 /* fs_ct_hdr.ct_fs_subtype */
+
+/*
+ * Name server Requests.
+ * Note: this is an incomplete list, some unused requests are omitted.
+ */
+enum fc_ns_req {
+ FC_NS_GA_NXT = 0x0100, /* get all next */
+ FC_NS_GI_A = 0x0101, /* get identifiers - scope */
+ FC_NS_GPN_ID = 0x0112, /* get port name by ID */
+ FC_NS_GNN_ID = 0x0113, /* get node name by ID */
+ FC_NS_GID_PN = 0x0121, /* get ID for port name */
+ FC_NS_GID_NN = 0x0131, /* get IDs for node name */
+ FC_NS_GID_FT = 0x0171, /* get IDs by FC4 type */
+ FC_NS_GPN_FT = 0x0172, /* get port names by FC4 type */
+ FC_NS_GID_PT = 0x01a1, /* get IDs by port type */
+ FC_NS_RFT_ID = 0x0217, /* reg FC4 type for ID */
+ FC_NS_RPN_ID = 0x0212, /* reg port name for ID */
+ FC_NS_RNN_ID = 0x0213, /* reg node name for ID */
+};
+
+/*
+ * Port type values.
+ */
+enum fc_ns_pt {
+ FC_NS_UNID_PORT = 0x00, /* unidentified */
+ FC_NS_N_PORT = 0x01, /* N port */
+ FC_NS_NL_PORT = 0x02, /* NL port */
+ FC_NS_FNL_PORT = 0x03, /* F/NL port */
+ FC_NS_NX_PORT = 0x7f, /* Nx port */
+ FC_NS_F_PORT = 0x81, /* F port */
+ FC_NS_FL_PORT = 0x82, /* FL port */
+ FC_NS_E_PORT = 0x84, /* E port */
+ FC_NS_B_PORT = 0x85, /* B port */
+};
+
+/*
+ * Port type object.
+ */
+struct fc_ns_pt_obj {
+ __u8 pt_type;
+};
+
+/*
+ * Port ID object
+ */
+struct fc_ns_fid {
+ __u8 fp_flags; /* flags for responses only */
+ __u8 fp_fid[3];
+};
+
+/*
+ * fp_flags in port ID object, for responses only.
+ */
+#define FC_NS_FID_LAST 0x80 /* last object */
+
+/*
+ * FC4-types object.
+ */
+#define FC_NS_TYPES 256 /* number of possible FC-4 types */
+#define FC_NS_BPW 32 /* bits per word in bitmap */
+
+struct fc_ns_fts {
+ __be32 ff_type_map[FC_NS_TYPES / FC_NS_BPW]; /* bitmap of FC-4 types */
+};
+
+/*
+ * GID_PT request.
+ */
+struct fc_ns_gid_pt {
+ __u8 fn_pt_type;
+ __u8 fn_domain_id_scope;
+ __u8 fn_area_id_scope;
+ __u8 fn_resvd;
+};
+
+/*
+ * GID_FT or GPN_FT request.
+ */
+struct fc_ns_gid_ft {
+ __u8 fn_resvd;
+ __u8 fn_domain_id_scope;
+ __u8 fn_area_id_scope;
+ __u8 fn_fc4_type;
+};
+
+/*
+ * GPN_FT response.
+ */
+struct fc_gpn_ft_resp {
+ __u8 fp_flags; /* see fp_flags definitions above */
+ __u8 fp_fid[3]; /* port ID */
+ __be32 fp_resvd;
+ __be64 fp_wwpn; /* port name */
+};
+
+/*
+ * GID_PN request
+ */
+struct fc_ns_gid_pn {
+ __be64 fn_wwpn; /* port name */
+};
+
+/*
+ * GID_PN response
+ */
+struct fc_gid_pn_resp {
+ __u8 fp_resvd;
+ __u8 fp_fid[3]; /* port ID */
+};
+
+/*
+ * RFT_ID request - register FC-4 types for ID.
+ */
+struct fc_ns_rft_id {
+ struct fc_ns_fid fr_fid; /* port ID object */
+ struct fc_ns_fts fr_fts; /* FC-4 types object */
+};
+
+/*
+ * RPN_ID request - register port name for ID.
+ * RNN_ID request - register node name for ID.
+ */
+struct fc_ns_rn_id {
+ struct fc_ns_fid fr_fid; /* port ID object */
+ __be64 fr_wwn; /* node name or port name */
+} __attribute__((__packed__));
+
+#endif /* _FC_NS_H_ */
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 2/3] [RFC] libfc: a modular software Fibre Channel implementation
2008-08-08 23:26 [PATCH 0/3][RFC] libfc and fcoe Robert Love
2008-08-08 23:26 ` [PATCH 1/3] [RFC] FC protocol definition header files Robert Love
@ 2008-08-08 23:26 ` Robert Love
2008-08-08 23:26 ` [PATCH 3/3] [RFC] fcoe: Fibre Channel over Ethernet Robert Love
2 siblings, 0 replies; 4+ messages in thread
From: Robert Love @ 2008-08-08 23:26 UTC (permalink / raw)
To: linux-scsi
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Yi Zou <yi.zou@intel.com>
Signed-off-by: Steve Ma <steve.ma@intel.com>
---
drivers/scsi/Kconfig | 6
drivers/scsi/Makefile | 1
drivers/scsi/libfc/Makefile | 12
drivers/scsi/libfc/fc_attr.c | 129 ++
drivers/scsi/libfc/fc_exch.c | 1902 +++++++++++++++++++++++++++++++++++++
drivers/scsi/libfc/fc_fcp.c | 2121 +++++++++++++++++++++++++++++++++++++++++
drivers/scsi/libfc/fc_frame.c | 88 ++
drivers/scsi/libfc/fc_lport.c | 914 ++++++++++++++++++
drivers/scsi/libfc/fc_ns.c | 1229 ++++++++++++++++++++++++
drivers/scsi/libfc/fc_rport.c | 1265 ++++++++++++++++++++++++
include/scsi/libfc/fc_frame.h | 236 +++++
include/scsi/libfc/libfc.h | 737 ++++++++++++++
12 files changed, 8640 insertions(+), 0 deletions(-)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c7f0629..ae5e574 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -328,6 +328,12 @@ menuconfig SCSI_LOWLEVEL
if SCSI_LOWLEVEL && SCSI
+config LIBFC
+ tristate "LibFC module"
+ depends on SCSI && SCSI_FC_ATTRS
+ ---help---
+ Fibre Channel library module
+
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 72fd504..9158dc6 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
obj-$(CONFIG_SCSI_DH) += device_handler/
+obj-$(CONFIG_LIBFC) += libfc/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 0000000..0a31ca2
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,12 @@
+# $Id: Makefile
+
+obj-$(CONFIG_LIBFC) += libfc.o
+
+libfc-objs := \
+ fc_ns.o \
+ fc_exch.o \
+ fc_frame.o \
+ fc_lport.o \
+ fc_rport.o \
+ fc_attr.o \
+ fc_fcp.o
diff --git a/drivers/scsi/libfc/fc_attr.c b/drivers/scsi/libfc/fc_attr.c
new file mode 100644
index 0000000..d73f39e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_attr.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <scsi/scsi_host.h>
+
+#include <scsi/libfc/libfc.h>
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL");
+
+void fc_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp = shost_priv(shost);
+
+ fc_host_port_id(shost) = fc_lport_get_fid(lp);
+}
+EXPORT_SYMBOL(fc_get_host_port_id);
+
+void fc_get_host_speed(struct Scsi_Host *shost)
+{
+ /*
+ * should be obtain from DEC or Enet Driver
+ */
+ fc_host_speed(shost) = 1; /* for now it is 1g */
+}
+EXPORT_SYMBOL(fc_get_host_speed);
+
+void fc_get_host_port_type(struct Scsi_Host *shost)
+{
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+}
+EXPORT_SYMBOL(fc_get_host_port_type);
+
+void fc_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp = shost_priv(shost);
+
+ fc_host_fabric_name(shost) = lp->wwnn;
+}
+EXPORT_SYMBOL(fc_get_host_fabric_name);
+
+void fc_attr_init(struct fc_lport *lp)
+{
+ fc_host_node_name(lp->host) = lp->wwnn;
+ fc_host_port_name(lp->host) = lp->wwpn;
+ fc_host_supported_classes(lp->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(lp->host), 0,
+ sizeof(fc_host_supported_fc4s(lp->host)));
+ fc_host_supported_fc4s(lp->host)[2] = 1;
+ fc_host_supported_fc4s(lp->host)[7] = 1;
+ /* This value is also unchanging */
+ memset(fc_host_active_fc4s(lp->host), 0,
+ sizeof(fc_host_active_fc4s(lp->host)));
+ fc_host_active_fc4s(lp->host)[2] = 1;
+ fc_host_active_fc4s(lp->host)[7] = 1;
+ fc_host_maxframe_size(lp->host) = lp->mfs;
+}
+EXPORT_SYMBOL(fc_attr_init);
+
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout + 5;
+ else
+ rport->dev_loss_tmo = 30;
+
+}
+EXPORT_SYMBOL(fc_set_rport_loss_tmo);
+
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+{
+ int i;
+ struct fc_host_statistics *fcoe_stats;
+ struct fc_lport *lp = shost_priv(shost);
+ struct timespec v0, v1;
+
+ fcoe_stats = &lp->host_stats;
+ memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
+
+ jiffies_to_timespec(jiffies, &v0);
+ jiffies_to_timespec(lp->boot_time, &v1);
+ fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+
+ for_each_online_cpu(i) {
+ struct fcoe_dev_stats *stats = lp->dev_stats[i];
+ if (stats == NULL)
+ continue;
+ fcoe_stats->tx_frames += stats->TxFrames;
+ fcoe_stats->tx_words += stats->TxWords;
+ fcoe_stats->rx_frames += stats->RxFrames;
+ fcoe_stats->rx_words += stats->RxWords;
+ fcoe_stats->error_frames += stats->ErrorFrames;
+ fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
+ fcoe_stats->fcp_input_requests += stats->InputRequests;
+ fcoe_stats->fcp_output_requests += stats->OutputRequests;
+ fcoe_stats->fcp_control_requests += stats->ControlRequests;
+ fcoe_stats->fcp_input_megabytes += stats->InputMegabytes;
+ fcoe_stats->fcp_output_megabytes += stats->OutputMegabytes;
+ fcoe_stats->link_failure_count += stats->LinkFailureCount;
+ }
+ fcoe_stats->lip_count = -1;
+ fcoe_stats->nos_count = -1;
+ fcoe_stats->loss_of_sync_count = -1;
+ fcoe_stats->loss_of_signal_count = -1;
+ fcoe_stats->prim_seq_protocol_err_count = -1;
+ fcoe_stats->dumped_frames = -1;
+ return fcoe_stats;
+}
+EXPORT_SYMBOL(fc_get_host_stats);
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 0000000..4e552c0
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,1902 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Fibre Channel exchange and sequence handling.
+ */
+
+#include <linux/timer.h>
+#include <linux/gfp.h>
+#include <linux/err.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc/libfc.h>
+
+#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
+
+/*
+ * fc_exch_debug can be set in debugger or at compile time to get more logs.
+ */
+static int fc_exch_debug;
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences.
+ *
+ * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
+ *
+ * fc_exch_mgr holds the exchange state for an N port
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+/*
+ * Sequence.
+ */
+struct fc_seq {
+ u8 id; /* seq ID */
+ u16 ssb_stat; /* status flags for sequence status block */
+ u16 cnt; /* frames sent so far on sequence */
+ u32 f_ctl; /* F_CTL flags for frames */
+ u32 rec_data; /* FC-4 value for REC */
+};
+
+/*
+ * Exchange.
+ *
+ * Locking notes: The ex_lock protects changes to the following fields:
+ * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
+ * seq_id
+ * sequence allocation
+ */
+struct fc_exch {
+ struct fc_exch_mgr *em; /* exchange manager */
+ u16 xid; /* our exchange ID */
+ struct list_head ex_list; /* free or busy list linkage */
+ spinlock_t ex_lock; /* lock covering exchange state */
+ atomic_t ex_refcnt; /* reference counter */
+ struct timer_list ex_timer; /* timer for upper level protocols */
+ struct fc_lport *lp; /* fc device instance */
+ u16 oxid; /* originator's exchange ID */
+ u16 rxid; /* responder's exchange ID */
+ u32 oid; /* originator's FCID */
+ u32 sid; /* source FCID */
+ u32 did; /* destination FCID */
+ u32 esb_stat; /* exchange status for ESB */
+ u32 r_a_tov; /* r_a_tov from rport (msec) */
+ u8 seq_id; /* next sequence ID to use */
+ u32 f_ctl; /* F_CTL flags for sequences */
+ enum fc_class class; /* class of service */
+ struct fc_seq seq; /* single sequence */
+
+ /*
+ * Handler for responses to this current exchange.
+ */
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *);
+ void *resp_arg; /* 3rd arg for exchange resp handler */
+};
+
+/*
+ * Exchange manager.
+ *
+ * This structure is the center for creating exchanges and sequences.
+ * It manages the allocation of exchange IDs.
+ */
+struct fc_exch_mgr {
+ enum fc_class class; /* default class for sequences */
+ spinlock_t em_lock; /* exchange manager lock */
+ u16 last_xid; /* last allocated exchange ID */
+ u16 min_xid; /* min exchange ID */
+ u16 max_xid; /* max exchange ID */
+ char em_cache_name[20]; /* cache name string */
+ struct kmem_cache *em_cache; /* cache for exchanges */
+ u32 total_exches; /* total allocated exchanges */
+ struct list_head ex_list; /* allocated exchanges list */
+ struct fc_lport *lp; /* fc device instance */
+
+ /*
+ * currently exchange mgr stats are updated but not used.
+ * either stats can be expose via sysfs or remove them
+ * all together if not used XXX
+ */
+ struct {
+ atomic_t no_free_exch;
+ atomic_t no_free_exch_xid;
+ atomic_t xid_not_found;
+ atomic_t xid_busy;
+ atomic_t seq_not_found;
+ atomic_t non_bls_resp;
+ } stats;
+ struct fc_exch **exches; /* for exch pointers indexed by xid */
+};
+
+#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
+#define fc_exch_next_xid(mp, id) ((id == mp->max_xid) ? mp->min_xid : id + 1)
+
+static void fc_exch_rrq(struct fc_exch *);
+static void fc_seq_ls_acc(struct fc_seq *);
+static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
+ enum fc_els_rjt_explan);
+static void fc_exch_els_rec(struct fc_seq *, struct fc_frame *);
+static void fc_exch_els_rrq(struct fc_seq *, struct fc_frame *);
+
+/*
+ * Internal implementation notes.
+ *
+ * The exchange manager is one by default in libfc but LLD may choose
+ * to have one per CPU. The sequence manager is one per exchange manager
+ * and currently never separated.
+ *
+ * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
+ * assigned by the Sequence Initiator that shall be unique for a specific
+ * D_ID and S_ID pair while the Sequence is open." Note that it isn't
+ * qualified by exchange ID, which one might think it would be.
+ * In practice this limits the number of open sequences and exchanges to 256
+ * per session. For most targets we could treat this limit as per exchange.
+ *
+ * The exchange and its sequence are freed when the last sequence is received.
+ * It's possible for the remote port to leave an exchange open without
+ * sending any sequences.
+ *
+ * Notes on reference counts:
+ *
+ * Exchanges are reference counted and exchange gets freed when the reference
+ * count becomes zero.
+ *
+ * Timeouts:
+ * Sequences are timed out for E_D_TOV and R_A_TOV.
+ *
+ * Sequence event handling:
+ *
+ * The following events may occur on initiator sequences:
+ *
+ * Send.
+ * For now, the whole thing is sent.
+ * Receive ACK
+ * This applies only to class F.
+ * The sequence is marked complete.
+ * ULP completion.
+ * The upper layer calls fc_exch_done() when done
+ * with exchange and sequence tuple.
+ * RX-inferred completion.
+ * When we receive the next sequence on the same exchange, we can
+ * retire the previous sequence ID. (XXX not implemented).
+ * Timeout.
+ * R_A_TOV frees the sequence ID. If we're waiting for ACK,
+ * E_D_TOV causes abort and calls upper layer response handler
+ * with FC_EX_TIMEOUT error.
+ * Receive RJT
+ * XXX defer.
+ * Send ABTS
+ * On timeout.
+ *
+ * The following events may occur on recipient sequences:
+ *
+ * Receive
+ * Allocate sequence for first frame received.
+ * Hold during receive handler.
+ * Release when final frame received.
+ * Keep status of last N of these for the ELS RES command. XXX TBD.
+ * Receive ABTS
+ * Deallocate sequence
+ * Send RJT
+ * Deallocate
+ *
+ * For now, we neglect conditions where only part of a sequence was
+ * received or transmitted, or where out-of-order receipt is detected.
+ */
+
+/*
+ * Locking notes:
+ *
+ * The EM code run in a per-CPU worker thread.
+ *
+ * To protect against concurrency between a worker thread code and timers,
+ * sequence allocation and deallocation must be locked.
+ * - exchange refcnt can be done atomicly without locks.
+ * - sequence allocation must be locked by exch lock.
+ */
+
+/*
+ * opcode names for debugging.
+ */
+static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
+
+#define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
+
+static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
+ unsigned int max_index)
+{
+ const char *name = NULL;
+
+ if (op < max_index)
+ name = table[op];
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+static const char *fc_exch_rctl_name(unsigned int op)
+{
+ return fc_exch_name_lookup(op, fc_exch_rctl_names,
+ FC_TABLE_SIZE(fc_exch_rctl_names));
+}
+
+/*
+ * Hold an exchange - keep it from being freed.
+ */
+static void fc_exch_hold(struct fc_exch *ep)
+{
+ atomic_inc(&ep->ex_refcnt);
+}
+
+/*
+ * Fill in frame header.
+ *
+ * The following fields are the responsibility of this routine:
+ * d_id, s_id, df_ctl, oxid, rxid, cs_ctl, seq_id
+ *
+ * The following fields are handled by the caller.
+ * r_ctl, type, f_ctl, seq_cnt, parm_offset
+ *
+ * That should be a complete list.
+ *
+ * We may be the originator or responder to the sequence.
+ */
+static void fc_seq_fill_hdr(struct fc_seq *sp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep;
+
+ ep = fc_seq_exch(sp);
+
+ hton24(fh->fh_s_id, ep->sid);
+ hton24(fh->fh_d_id, ep->did);
+ fh->fh_ox_id = htons(ep->oxid);
+ fh->fh_rx_id = htons(ep->rxid);
+ fh->fh_seq_id = sp->id;
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+}
+
+/*
+ * Release a reference to an exchange.
+ * If the refcnt goes to zero and the exchange is complete, it is freed.
+ */
+static void fc_exch_release(struct fc_exch *ep)
+{
+ struct fc_exch_mgr *mp;
+
+ if (atomic_dec_and_test(&ep->ex_refcnt)) {
+ WARN_ON(!ep->esb_stat & ESB_ST_COMPLETE);
+ del_timer(&ep->ex_timer);
+ mp = ep->em;
+ if (ep->lp->tt.exch_put)
+ ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
+ spin_lock_bh(&mp->em_lock);
+ WARN_ON(mp->total_exches <= 0);
+ mp->total_exches--;
+ mp->exches[ep->xid - mp->min_xid] = NULL;
+ list_del(&ep->ex_list);
+ spin_unlock_bh(&mp->em_lock);
+ kmem_cache_free(mp->em_cache, ep);
+ }
+}
+
+/*
+ * Internal version of fc_exch_timer_set - used with lock held.
+ */
+static void fc_exch_timer_set_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ if (!timer_pending(&ep->ex_timer))
+ fc_exch_hold(ep); /* hold for timer */
+ mod_timer(&ep->ex_timer, jiffies + msecs_to_jiffies(timer_msec));
+}
+
+/*
+ * Set timer for an exchange.
+ * The time is a minimum delay in milliseconds until the timer fires.
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ * Returns non-zero if a timer couldn't be allocated.
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_timer_set_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/*
+ * Abort the exchange for a sequence due to timeout or an upper-level abort.
+ * Called without the exchange manager em_lock held.
+ * Returns non-zero if a sequence could not be allocated.
+ */
+int fc_seq_exch_abort(const struct fc_seq *req_sp)
+{
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ struct fc_frame *fp;
+ int error;
+
+ ep = fc_seq_exch(req_sp);
+
+ /*
+ * Send the abort on a new sequence if possible.
+ */
+ error = ENOMEM;
+ sp = fc_seq_start_next(&ep->seq);
+ if (sp) {
+ spin_lock_bh(&ep->ex_lock);
+ sp->f_ctl |= FC_FC_SEQ_INIT;
+ ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+ spin_unlock_bh(&ep->ex_lock);
+
+ /*
+ * If not logged into the fabric, don't send ABTS but leave
+ * sequence active until next timeout.
+ */
+ if (!ep->sid)
+ return 0;
+
+ /*
+ * Send an abort for the sequence that timed out.
+ */
+ fp = fc_frame_alloc(ep->lp, 0);
+ if (fp) {
+ fc_frame_setup(fp, FC_RCTL_BA_ABTS, FC_TYPE_BLS);
+ error = fc_seq_send(ep->lp, sp, fp, FC_FC_END_SEQ);
+ } else {
+ error = ENOBUFS;
+ }
+ }
+ return error;
+}
+EXPORT_SYMBOL(fc_seq_exch_abort);
+
+/*
+ * Exchange timeout - handle exchange timer expiration.
+ * The timer will have been cancelled before this is called.
+ */
+static void fc_exch_timeout(unsigned long ep_arg)
+{
+ struct fc_exch *ep = (struct fc_exch *)ep_arg;
+ struct fc_seq *sp = &ep->seq;
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *arg;
+ u32 e_stat;
+
+ spin_lock_bh(&ep->ex_lock);
+ e_stat = ep->esb_stat;
+ if (e_stat & ESB_ST_COMPLETE) {
+ ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
+ spin_unlock_bh(&ep->ex_lock);
+ if (e_stat & ESB_ST_REC_QUAL)
+ fc_exch_rrq(ep);
+ } else if (e_stat & ESB_ST_ABNORMAL) {
+ ep->esb_stat |= ESB_ST_COMPLETE;
+ spin_unlock_bh(&ep->ex_lock);
+ } else {
+ fc_exch_hold(ep);
+ resp = ep->resp;
+ ep->resp = NULL;
+ arg = ep->resp_arg;
+ spin_unlock_bh(&ep->ex_lock);
+ fc_seq_exch_abort(sp);
+ fc_exch_release(ep);
+
+ if (resp)
+ resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
+ }
+
+ /*
+ * This release matches the hold taken when the timer was set.
+ */
+ fc_exch_release(ep);
+}
+
+/*
+ * Allocate a sequence.
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+ struct fc_seq *sp;
+
+ sp = &ep->seq;
+ sp->ssb_stat = 0;
+ sp->f_ctl = 0;
+ sp->cnt = 0;
+ sp->id = seq_id;
+ return sp;
+}
+
+/*
+ * Allocate an exchange.
+ *
+ * if xid is supplied zero then assign next free exchange ID
+ * from exchange manager, otherwise use supplied xid.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 xid)
+{
+ struct fc_exch *ep = NULL;
+ u16 min_xid, max_xid;
+
+ min_xid = mp->min_xid;
+ max_xid = mp->max_xid;
+ /*
+ * if xid is supplied then verify its xid range
+ */
+ if (xid) {
+ if (unlikely((xid < min_xid) || (xid > max_xid))) {
+ FC_DBG("Invalid xid 0x:%x\n", xid);
+ goto out;
+ }
+ if (unlikely(mp->exches[xid - min_xid] != NULL)) {
+ FC_DBG("xid 0x:%x is already in use\n", xid);
+ goto out;
+ }
+ }
+
+ /*
+ * Allocate new exchange
+ */
+ ep = kmem_cache_zalloc(mp->em_cache, GFP_ATOMIC);
+ if (!ep) {
+ atomic_inc(&mp->stats.no_free_exch);
+ goto out;
+ }
+
+ spin_lock_bh(&mp->em_lock);
+
+ /*
+ * if xid is zero then assign next free exchange ID
+ */
+ if (!xid) {
+ xid = fc_exch_next_xid(mp, mp->last_xid);
+ /*
+ * find next free xid using linear search
+ */
+ while (mp->exches[xid - min_xid] != NULL) {
+ if (xid == mp->last_xid)
+ break;
+ xid = fc_exch_next_xid(mp, xid);
+ }
+
+ if (likely(mp->exches[xid - min_xid] == NULL)) {
+ mp->exches[xid - min_xid] = ep;
+ mp->last_xid = xid;
+ } else {
+ spin_unlock_bh(&mp->em_lock);
+ atomic_inc(&mp->stats.no_free_exch_xid);
+ kmem_cache_free(mp->em_cache, ep);
+ goto out;
+ }
+ }
+
+ list_add_tail(&ep->ex_list, &mp->ex_list);
+ fc_seq_alloc(ep, ep->seq_id++);
+ mp->total_exches++;
+ spin_unlock_bh(&mp->em_lock);
+
+ /*
+ * update exchange
+ */
+ ep->oxid = ep->xid = xid;
+ ep->em = mp;
+ ep->lp = mp->lp;
+ ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
+ ep->rxid = FC_XID_UNKNOWN;
+ ep->class = mp->class;
+
+ spin_lock_init(&ep->ex_lock);
+ setup_timer(&ep->ex_timer, fc_exch_timeout, (unsigned long)ep);
+
+ fc_exch_hold(ep); /* hold for caller */
+out:
+ return ep;
+}
+EXPORT_SYMBOL(fc_exch_alloc);
+
+/*
+ * Lookup and hold an exchange.
+ */
+static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
+{
+ struct fc_exch *ep = NULL;
+
+ if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
+ spin_lock_bh(&mp->em_lock);
+ ep = mp->exches[xid - mp->min_xid];
+ if (ep) {
+ fc_exch_hold(ep);
+ WARN_ON(ep->xid != xid);
+ }
+ spin_unlock_bh(&mp->em_lock);
+ }
+ return ep;
+}
+
+/*
+ * Mark exchange complete - internal version called with ex_lock held.
+ */
+static void fc_exch_complete_locked(struct fc_exch *ep)
+{
+ ep->esb_stat |= ESB_ST_COMPLETE;
+ ep->resp = NULL;
+
+ /*
+ * Assuming in-order delivery, the timeout for RRQ is 0, not R_A_TOV.
+ * Here, we allow a short time for frames which may have been
+ * re-ordered in various kernel queues or due to interrupt balancing.
+ * Also, using a timer here allows us to issue the RRQ after the
+ * exchange lock is dropped.
+ */
+ if (unlikely(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ fc_exch_timer_set_locked(ep, 10);
+ } else {
+ if (timer_pending(&ep->ex_timer)) {
+ del_timer(&ep->ex_timer);
+ /*
+ * drop hold for timer
+ */
+ atomic_dec(&ep->ex_refcnt);
+ }
+ atomic_dec(&ep->ex_refcnt);
+ }
+}
+
+/*
+ * Mark exchange complete.
+ * The state may be available for ILS Read Exchange Status (RES) for a time.
+ * The caller doesn't necessarily hold the exchange.
+ */
+static void fc_exch_complete(struct fc_exch *ep)
+{
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_complete_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/*
+ * Allocate a new exchange as responder.
+ * Sets the responder ID in the frame header.
+ */
+static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh;
+ u16 rxid;
+
+ ep = mp->lp->tt.exch_get(mp->lp, fp);
+ if (ep) {
+ ep->class = fc_frame_class(fp);
+
+ /*
+ * Set EX_CTX indicating we're responding on this exchange.
+ */
+ ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
+ fh = fc_frame_header_get(fp);
+ ep->sid = ntoh24(fh->fh_d_id);
+ ep->did = ntoh24(fh->fh_s_id);
+ ep->oid = ep->did;
+
+ /*
+ * Allocated exchange has placed the XID in the
+ * originator field. Move it to the responder field,
+ * and set the originator XID from the frame.
+ */
+ ep->rxid = ep->xid;
+ ep->oxid = ntohs(fh->fh_ox_id);
+ ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
+ if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+
+ /*
+ * Set the responder ID in the frame header.
+ * The old one should've been 0xffff.
+ * If it isn't, don't assign one.
+ * Incoming basic link service frames may specify
+ * a referenced RX_ID.
+ */
+ if (fh->fh_type != FC_TYPE_BLS) {
+ rxid = ntohs(fh->fh_rx_id);
+ WARN_ON(rxid != FC_XID_UNKNOWN);
+ fh->fh_rx_id = htons(ep->rxid);
+ }
+ }
+ return ep;
+}
+
+/*
+ * Find a sequence for receive where the other end is originating the sequence.
+ */
+static enum fc_pf_rjt_reason
+fc_seq_lookup_recip(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep = NULL;
+ struct fc_seq *sp = NULL;
+ enum fc_pf_rjt_reason reject = FC_RJT_NONE;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
+
+ /*
+ * Lookup or create the exchange if we will be creating the sequence.
+ */
+ if (f_ctl & FC_FC_EX_CTX) {
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ else if (ep->rxid != ntohs(fh->fh_rx_id)) {
+ fc_exch_release(ep);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+ } else {
+ xid = ntohs(fh->fh_rx_id); /* we are the responder */
+
+ /*
+ * Special case for MDS issuing an ELS TEST with a
+ * bad rxid of 0.
+ * XXX take this out once we do the proper reject.
+ */
+ if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fc_frame_payload_op(fp) == ELS_TEST) {
+ fh->fh_rx_id = htons(FC_XID_UNKNOWN);
+ xid = FC_XID_UNKNOWN;
+ }
+
+ /*
+ * new sequence - find the exchange
+ */
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+ fc_exch_release(ep);
+ atomic_inc(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto out;
+ }
+ ep = fc_exch_resp(mp, fp);
+ if (!ep) {
+ reject = FC_RJT_EXCH_EST; /* XXX */
+ goto out;
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+ }
+
+ /*
+ * At this point, we have the exchange held.
+ * Find or create the sequence.
+ */
+ if (fc_sof_is_init(fr_sof(fp))) {
+ sp = fc_seq_start_next(&ep->seq);
+ sp->id = fh->fh_seq_id;
+ if (!sp) {
+ reject = FC_RJT_SEQ_XS; /* exchange shortage */
+ goto out;
+ }
+ sp->ssb_stat |= SSB_ST_RESP;
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
+ goto out;
+ }
+ }
+ WARN_ON(ep != fc_seq_exch(sp));
+
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+ fr_seq(fp) = sp;
+out:
+ return reject;
+}
+
+/*
+ * Find the sequence for a frame being received.
+ * We originated the sequence, so it should be found.
+ * We may or may not have originated the exchange.
+ * Does not hold the sequence for the caller.
+ */
+static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
+ xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
+ ep = fc_exch_find(mp, xid);
+ if (!ep)
+ return NULL;
+ if (ep->seq.id == fh->fh_seq_id) {
+ /*
+ * Save the RX_ID if we didn't previously know it.
+ */
+ sp = &ep->seq;
+ if ((f_ctl & FC_FC_EX_CTX) != 0 &&
+ ep->rxid == FC_XID_UNKNOWN) {
+ ep->rxid = ntohs(fh->fh_rx_id);
+ }
+ }
+ fc_exch_release(ep);
+ return sp;
+}
+
+/*
+ * Set addresses for an exchange.
+ * Note this must be done before the first sequence of the exchange is sent.
+ */
+static void fc_exch_set_addr(struct fc_exch *ep,
+ u32 orig_id, u32 resp_id)
+{
+ ep->oid = orig_id;
+ if (ep->esb_stat & ESB_ST_RESP) {
+ ep->sid = resp_id;
+ ep->did = orig_id;
+ } else {
+ ep->sid = orig_id;
+ ep->did = resp_id;
+ }
+}
+
+/*
+ * Allocate a new sequence on the same exchange as the supplied sequence.
+ * This will never return NULL.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
+
+ sp = fc_seq_alloc(ep, ep->seq_id++);
+
+ if (fc_exch_debug)
+ FC_DBG("exch %4x f_ctl %6x seq %2x f_ctl %6x\n",
+ ep->xid, ep->f_ctl, sp->id, sp->f_ctl);
+ spin_unlock_bh(&ep->ex_lock);
+ return sp;
+}
+EXPORT_SYMBOL(fc_seq_start_next);
+
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
+ struct fc_frame *fp, u32 f_ctl)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh;
+ enum fc_class class;
+ u16 fill = 0;
+ int error;
+
+ ep = fc_seq_exch(sp);
+ WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+
+ fc_seq_fill_hdr(sp, fp);
+ fh = fc_frame_header_get(fp);
+ class = ep->class;
+ fr_sof(fp) = class;
+ if (sp->cnt)
+ fr_sof(fp) = fc_sof_normal(class);
+
+ if (f_ctl & FC_FC_END_SEQ) {
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(class))
+ fr_eof(fp) = FC_EOF_N;
+ /*
+ * Form f_ctl.
+ * The number of fill bytes to make the length a 4-byte
+ * multiple is the low order 2-bits of the f_ctl.
+ * The fill itself will have been cleared by the frame
+ * allocation.
+ * After this, the length will be even, as expected by
+ * the transport. Don't include the fill in the f_ctl
+ * saved in the sequence.
+ */
+ fill = fr_len(fp) & 3;
+ if (fill) {
+ fill = 4 - fill;
+ /* TODO, this may be a problem with fragmented skb */
+ skb_put(fp_skb(fp), fill);
+ }
+ f_ctl |= sp->f_ctl | ep->f_ctl;
+ } else {
+ WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
+ f_ctl |= sp->f_ctl | ep->f_ctl;
+ f_ctl &= ~FC_FC_SEQ_INIT;
+ fr_eof(fp) = FC_EOF_N;
+ }
+
+ hton24(fh->fh_f_ctl, f_ctl | fill);
+ fh->fh_seq_cnt = htons(sp->cnt++);
+
+ /*
+ * Send the frame.
+ */
+ error = lp->tt.frame_send(lp, fp);
+
+ /*
+ * Update the exchange and sequence flags,
+ * assuming all frames for the sequence have been sent.
+ * We can only be called to send once for each sequence.
+ */
+ spin_lock_bh(&ep->ex_lock);
+ sp->f_ctl = f_ctl; /* save for possible abort */
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
+ if (f_ctl & FC_FC_END_SEQ) {
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+ }
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+EXPORT_SYMBOL(fc_seq_send);
+
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
+{
+ switch (els_cmd) {
+ case ELS_LS_RJT:
+ fc_seq_ls_rjt(sp, els_data->reason, els_data->explan);
+ break;
+ case ELS_LS_ACC:
+ fc_seq_ls_acc(sp);
+ break;
+ case ELS_RRQ:
+ fc_exch_els_rrq(sp, els_data->fp);
+ break;
+ case ELS_REC:
+ fc_exch_els_rec(sp, els_data->fp);
+ break;
+ default:
+ FC_DBG("Invalid ELS CMD:%x\n", els_cmd);
+ }
+}
+EXPORT_SYMBOL(fc_seq_els_rsp_send);
+
+/*
+ * Send a sequence, which is also the last sequence in the exchange.
+ */
+static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
+ enum fc_rctl rctl, enum fc_fh_type fh_type)
+{
+ u32 f_ctl;
+
+ fc_frame_setup(fp, rctl, fh_type);
+ f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_seq_send(fc_seq_exch(sp)->lp, sp, fp, f_ctl);
+}
+
+/*
+ * Send ACK_1 (or equiv.) indicating we received something.
+ * The frame we're acking is supplied.
+ */
+static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_lport *lp = fc_seq_exch(sp)->lp;
+ unsigned int f_ctl;
+
+ /*
+ * Don't send ACKs for class 3.
+ */
+ if (fc_sof_needs_ack(fr_sof(rx_fp))) {
+ fp = fc_frame_alloc(lp, 0);
+ BUG_ON(!fp);
+ if (!fp)
+ return;
+
+ fc_seq_fill_hdr(sp, fp);
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_ACK_1;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ */
+ rx_fh = fc_frame_header_get(rx_fp);
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fh->fh_seq_id = rx_fh->fh_seq_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_parm_offset = htonl(1); /* ack single frame */
+
+ fr_sof(fp) = fr_sof(rx_fp);
+ if (f_ctl & FC_FC_END_SEQ)
+ fr_eof(fp) = FC_EOF_T;
+ else
+ fr_eof(fp) = FC_EOF_N;
+
+ (void) lp->tt.frame_send(lp, fp);
+ }
+}
+
+/*
+ * Send BLS Reject.
+ * This is for rejecting BA_ABTS only.
+ */
+static void
+fc_exch_send_ba_rjt(struct fc_frame *rx_fp, enum fc_ba_rjt_reason reason,
+ enum fc_ba_rjt_explan explan)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_ba_rjt *rp;
+ struct fc_lport *lp;
+ unsigned int f_ctl;
+
+ lp = fr_dev(rx_fp);
+ fp = fc_frame_alloc(lp, sizeof(*rp));
+ if (!fp)
+ return;
+ fh = fc_frame_header_get(fp);
+ rx_fh = fc_frame_header_get(rx_fp);
+
+ memset(fh, 0, sizeof(*fh) + sizeof(*rp));
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ rp->br_reason = reason;
+ rp->br_explan = explan;
+
+ /*
+ * seq_id, cs_ctl, df_ctl and param/offset are zero.
+ */
+ memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
+ memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
+ fh->fh_ox_id = rx_fh->fh_rx_id;
+ fh->fh_rx_id = rx_fh->fh_ox_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_r_ctl = FC_RCTL_BA_RJT;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ * Always set LAST_SEQ, END_SEQ.
+ */
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ f_ctl &= ~FC_FC_FIRST_SEQ;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(fr_sof(fp)))
+ fr_eof(fp) = FC_EOF_N;
+
+ (void) lp->tt.frame_send(lp, fp);
+}
+
+/*
+ * Handle an incoming ABTS. This would be for target mode usually,
+ * but could be due to lost FCP transfer ready, confirm or RRQ.
+ * We always handle this as an exchange abort, ignoring the parameter.
+ */
+static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_ba_acc *ap;
+ struct fc_frame_header *fh;
+ struct fc_seq *sp;
+
+ if (!ep)
+ goto reject;
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ spin_unlock_bh(&ep->ex_lock);
+ goto reject;
+ }
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL))
+ fc_exch_hold(ep); /* hold for REC_QUAL */
+ ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+
+ fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+ if (!fp) {
+ spin_unlock_bh(&ep->ex_lock);
+ goto free;
+ }
+ fh = fc_frame_header_get(fp);
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ memset(ap, 0, sizeof(*ap));
+ sp = &ep->seq;
+ ap->ba_high_seq_cnt = htons(0xffff);
+ if (sp->ssb_stat & SSB_ST_RESP) {
+ ap->ba_seq_id = sp->id;
+ ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
+ ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+ ap->ba_low_seq_cnt = htons(sp->cnt);
+ }
+ sp = fc_seq_start_next(sp);
+ spin_unlock_bh(&ep->ex_lock);
+ fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
+free:
+ fc_frame_free(rx_fp);
+}
+
+/*
+ * Handle receive where the other end is originating the sequence.
+ */
+static void fc_exch_recv_req(struct fc_lport *lp, struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = NULL;
+ struct fc_exch *ep = NULL;
+ enum fc_sof sof;
+ enum fc_eof eof;
+ u32 f_ctl;
+ enum fc_pf_rjt_reason reject;
+
+ fr_seq(fp) = NULL;
+ reject = fc_seq_lookup_recip(mp, fp);
+ if (reject == FC_RJT_NONE) {
+ sp = fr_seq(fp); /* sequence will be held */
+ ep = fc_seq_exch(sp);
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_seq_send_ack(sp, fp);
+
+ /*
+ * Call the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (ep->resp)
+ ep->resp(sp, fp, ep->resp_arg);
+ else
+ lp->tt.lport_recv(lp, sp, fp);
+ } else {
+ if (fc_exch_debug)
+ FC_DBG("exch/seq lookup failed: reject %x\n", reject);
+ fc_frame_free(fp);
+ }
+}
+
+/*
+ * Handle receive where the other end is originating the sequence in
+ * response to our exchange.
+ */
+static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ enum fc_sof sof;
+ u32 f_ctl;
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *ex_resp_arg;
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+ if (fc_sof_is_init(sof)) {
+ sp = fc_seq_start_next(&ep->seq);
+ sp->id = fh->fh_seq_id;
+ sp->ssb_stat |= SSB_ST_RESP;
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ goto rel;
+ }
+ }
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = sp;
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+
+ if (fc_sof_needs_ack(sof))
+ fc_seq_send_ack(sp, fp);
+ resp = ep->resp;
+ ex_resp_arg = ep->resp_arg;
+
+ if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
+ (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+ (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_complete_locked(ep);
+ WARN_ON(fc_seq_exch(sp) != ep);
+ spin_unlock_bh(&ep->ex_lock);
+ }
+
+ /*
+ * Call the receive function.
+ * The sequence is held (has a refcnt) for us,
+ * but not for the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (resp)
+ resp(sp, fp, ex_resp_arg);
+ else
+ fc_frame_free(fp);
+ fc_exch_release(ep);
+ return;
+rel:
+ fc_exch_release(ep);
+out:
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle receive for a sequence where other end is responding to our sequence.
+ */
+static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_seq *sp;
+
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+ if (!sp) {
+ atomic_inc(&mp->stats.xid_not_found);
+ if (fc_exch_debug)
+ FC_DBG("seq lookup failed\n");
+ } else {
+ atomic_inc(&mp->stats.non_bls_resp);
+ if (fc_exch_debug)
+ FC_DBG("non-BLS response to sequence");
+ }
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle the response to an ABTS for exchange or sequence.
+ * This can be BA_ACC or BA_RJT.
+ */
+static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_ba_acc *ap;
+ u16 low;
+ u16 high;
+
+ fh = fc_frame_header_get(fp);
+ if (fc_exch_debug)
+ FC_DBG("exch: BLS rctl %x - %s\n",
+ fh->fh_r_ctl, fc_exch_rctl_name(fh->fh_r_ctl));
+ fc_exch_hold(ep);
+ spin_lock_bh(&ep->ex_lock);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ if (!ap)
+ break;
+
+ /*
+ * Decide whether to establish a Recovery Qualifier.
+ * We do this if there is a non-empty SEQ_CNT range and
+ * SEQ_ID is the same as the one we aborted.
+ */
+ low = ntohs(ap->ba_low_seq_cnt);
+ high = ntohs(ap->ba_high_seq_cnt);
+ if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
+ (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
+ ap->ba_seq_id == ep->seq_id) && low != high) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_hold(ep); /* hold for recovery qualifier */
+ fc_exch_timer_set_locked(ep, 2 * ep->r_a_tov);
+ }
+ break;
+ case FC_RCTL_BA_RJT:
+ break;
+ default:
+ break;
+ }
+ if (ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
+ fc_exch_complete_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ fc_exch_release(ep);
+ fc_frame_free(fp);
+}
+
+/*
+ * Receive BLS sequence.
+ * This is always a sequence initiated by the remote side.
+ * We may be either the originator or recipient of the exchange.
+ */
+static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ fh = fc_frame_header_get(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = NULL;
+
+ ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
+ if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
+ spin_lock_bh(&ep->ex_lock);
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ }
+ if (f_ctl & FC_FC_SEQ_CTX) {
+ /*
+ * A response to a sequence we initiated.
+ * This should only be ACKs for class 2 or F.
+ */
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_ACK_1:
+ case FC_RCTL_ACK_0:
+ break;
+ default:
+ if (fc_exch_debug)
+ FC_DBG("BLS rctl %x - %s received",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
+ break;
+ }
+ fc_frame_free(fp);
+ } else {
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_ACC:
+ if (ep)
+ fc_exch_abts_resp(ep, fp);
+ else
+ fc_frame_free(fp);
+ break;
+ case FC_RCTL_BA_ABTS:
+ fc_exch_recv_abts(ep, fp);
+ break;
+ default: /* ignore junk */
+ fc_frame_free(fp);
+ break;
+ }
+ }
+ if (ep)
+ fc_exch_release(ep); /* release hold taken by fc_exch_find */
+}
+
+/*
+ * Accept sequence with LS_ACC.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_acc(struct fc_seq *req_sp)
+{
+ struct fc_seq *sp;
+ struct fc_els_ls_acc *acc;
+ struct fc_frame *fp;
+
+ sp = fc_seq_start_next(req_sp);
+ fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+ if (fp) {
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->la_cmd = ELS_LS_ACC;
+ fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ }
+}
+
+/*
+ * Reject sequence with ELS LS_RJT.
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_rjt(struct fc_seq *req_sp, enum fc_els_rjt_reason reason,
+ enum fc_els_rjt_explan explan)
+{
+ struct fc_seq *sp;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_frame *fp;
+
+ sp = fc_seq_start_next(req_sp);
+ fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*rjt));
+ if (fp) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ memset(rjt, 0, sizeof(*rjt));
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason;
+ rjt->er_explan = explan;
+ fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ }
+}
+
+static void fc_exch_reset(struct fc_exch *ep)
+{
+ struct fc_seq *sp;
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *);
+ void *arg;
+
+ fc_exch_hold(ep);
+ spin_lock_bh(&ep->ex_lock);
+ resp = ep->resp;
+ ep->resp = NULL;
+ if (ep->esb_stat & ESB_ST_REC_QUAL)
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ if (ep->esb_stat & ESB_ST_COMPLETE)
+ resp = NULL;
+ arg = ep->resp_arg;
+ if (timer_pending(&ep->ex_timer)) {
+ del_timer(&ep->ex_timer);
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ }
+ sp = &ep->seq;
+ ep->esb_stat |= ESB_ST_COMPLETE;
+ spin_unlock_bh(&ep->ex_lock);
+ if (resp)
+ resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
+ fc_exch_release(ep);
+}
+
+/*
+ * Reset an exchange manager, releasing all sequences and exchanges.
+ * If sid is non-zero, reset only exchanges we source from that FID.
+ * If did is non-zero, reset only exchanges destined to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
+{
+ struct fc_exch *ep;
+ struct fc_exch *next;
+
+ list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
+ if ((sid == 0 || sid == ep->sid) &&
+ (did == 0 || did == ep->did))
+ fc_exch_reset(ep);
+ }
+}
+EXPORT_SYMBOL(fc_exch_mgr_reset);
+
+void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid)
+{
+ struct fc_exch *ep;
+
+ ep = fc_seq_exch(sp);
+ *oxid = ep->oxid;
+ *rxid = ep->rxid;
+}
+EXPORT_SYMBOL(fc_seq_get_xids);
+
+void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data)
+{
+ sp->rec_data = rec_data;
+}
+EXPORT_SYMBOL(fc_seq_set_rec_data);
+
+/*
+ * Handle incoming ELS REC - Read Exchange Concise.
+ * Note that the requesting port may be different than the S_ID in the request.
+ */
+static void fc_exch_els_rec(struct fc_seq *sp, struct fc_frame *rfp)
+{
+ struct fc_frame *fp;
+ struct fc_exch *ep;
+ struct fc_exch_mgr *em;
+ struct fc_els_rec *rp;
+ struct fc_els_rec_acc *acc;
+ enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
+ enum fc_els_rjt_explan explan;
+ u32 sid;
+ u16 rxid;
+ u16 oxid;
+
+ rp = fc_frame_payload_get(rfp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+ sid = ntoh24(rp->rec_s_id);
+ rxid = ntohs(rp->rec_rx_id);
+ oxid = ntohs(rp->rec_ox_id);
+
+ /*
+ * Currently it's hard to find the local S_ID from the exchange
+ * manager. This will eventually be fixed, but for now it's easier
+ * to lookup the subject exchange twice, once as if we were
+ * the initiator, and then again if we weren't.
+ */
+ em = fc_seq_exch(sp)->em;
+ ep = fc_exch_find(em, oxid);
+ explan = ELS_EXPL_OXID_RXID;
+ if (ep && ep->oid == sid) {
+ if (ep->rxid != FC_XID_UNKNOWN &&
+ rxid != FC_XID_UNKNOWN &&
+ ep->rxid != rxid)
+ goto rel;
+ } else {
+ if (ep)
+ fc_exch_release(ep);
+ ep = NULL;
+ if (rxid != FC_XID_UNKNOWN)
+ ep = fc_exch_find(em, rxid);
+ if (!ep)
+ goto reject;
+ }
+
+ fp = fc_frame_alloc(fc_seq_exch(sp)->lp, sizeof(*acc));
+ if (!fp) {
+ fc_exch_done(sp);
+ goto out;
+ }
+ sp = fc_seq_start_next(sp);
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->reca_cmd = ELS_LS_ACC;
+ acc->reca_ox_id = rp->rec_ox_id;
+ memcpy(acc->reca_ofid, rp->rec_s_id, 3);
+ acc->reca_rx_id = htons(ep->rxid);
+ if (ep->sid == ep->oid)
+ hton24(acc->reca_rfid, ep->did);
+ else
+ hton24(acc->reca_rfid, ep->sid);
+ acc->reca_fc4value = htonl(ep->seq.rec_data);
+ acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
+ ESB_ST_SEQ_INIT |
+ ESB_ST_COMPLETE));
+ sp = fc_seq_start_next(sp);
+ fc_seq_send_last(sp, fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+out:
+ fc_exch_release(ep);
+ fc_frame_free(rfp);
+ return;
+
+rel:
+ fc_exch_release(ep);
+reject:
+ fc_seq_ls_rjt(sp, reason, explan);
+ fc_frame_free(rfp);
+}
+
+/*
+ * Handle response from RRQ.
+ * Not much to do here, really.
+ * Should report errors.
+ */
+static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ unsigned int op;
+
+ if (IS_ERR(fp))
+ return;
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_RJT)
+ FC_DBG("LS_RJT for RRQ");
+ else if (op != ELS_LS_ACC)
+ FC_DBG("unexpected response op %x for RRQ", op);
+ fc_frame_free(fp);
+}
+
+/*
+ * Send ELS RRQ - Reinstate Recovery Qualifier.
+ * This tells the remote port to stop blocking the use of
+ * the exchange and the seq_cnt range.
+ */
+static void fc_exch_rrq(struct fc_exch *ep)
+{
+ struct fc_lport *lp;
+ struct fc_els_rrq *rrq;
+ struct fc_frame *fp;
+ u32 did;
+
+ lp = ep->lp;
+
+ fp = fc_frame_alloc(lp, sizeof(*rrq));
+ if (!fp)
+ return;
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ rrq = fc_frame_payload_get(fp, sizeof(*rrq));
+ memset(rrq, 0, sizeof(*rrq));
+ rrq->rrq_cmd = ELS_RRQ;
+ hton24(rrq->rrq_s_id, ep->sid);
+ rrq->rrq_ox_id = htons(ep->oxid);
+ rrq->rrq_rx_id = htons(ep->rxid);
+
+ did = ep->did;
+ if (ep->esb_stat & ESB_ST_RESP)
+ did = ep->sid;
+ if (!fc_exch_seq_send(lp, fp, fc_exch_rrq_resp, NULL, lp->e_d_tov,
+ lp->fid, did, FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_exch_timer_set(ep, ep->r_a_tov);
+}
+
+/*
+ * Handle incoming ELS RRQ - Reset Recovery Qualifier.
+ */
+static void fc_exch_els_rrq(struct fc_seq *sp, struct fc_frame *fp)
+{
+ struct fc_exch *ep; /* request or subject exchange */
+ struct fc_els_rrq *rp;
+ u32 sid;
+ u16 xid;
+ enum fc_els_rjt_explan explan;
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+
+ /*
+ * lookup subject exchange.
+ */
+ ep = fc_seq_exch(sp);
+ sid = ntoh24(rp->rrq_s_id); /* subject source */
+ xid = ep->did == sid ? ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
+ ep = fc_exch_find(ep->em, xid);
+
+ explan = ELS_EXPL_OXID_RXID;
+ if (!ep)
+ goto reject;
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->oxid != ntohs(rp->rrq_ox_id))
+ goto unlock_reject;
+ if (ep->rxid != ntohs(rp->rrq_rx_id) &&
+ ep->rxid != FC_XID_UNKNOWN)
+ goto unlock_reject;
+ explan = ELS_EXPL_SID;
+ if (ep->sid != sid)
+ goto unlock_reject;
+
+ /*
+ * Clear Recovery Qualifier state, and cancel timer if complete.
+ */
+ if (ep->esb_stat & ESB_ST_REC_QUAL) {
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
+ }
+ if ((ep->esb_stat & ESB_ST_COMPLETE) && timer_pending(&ep->ex_timer)) {
+ del_timer(&ep->ex_timer);
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ }
+ spin_unlock_bh(&ep->ex_lock);
+
+ /*
+ * Send LS_ACC.
+ */
+ fc_seq_ls_acc(sp);
+ fc_frame_free(fp);
+ return;
+
+unlock_reject:
+ spin_unlock_bh(&ep->ex_lock);
+ fc_exch_release(ep); /* drop hold from fc_exch_find */
+reject:
+ fc_seq_ls_rjt(sp, ELS_RJT_LOGIC, explan);
+ fc_frame_free(fp);
+}
+
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+ enum fc_class class,
+ u16 min_xid,
+ u16 max_xid,
+ u32 em_idx)
+{
+ struct fc_exch_mgr *mp;
+ size_t len;
+
+ if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
+ FC_DBG("Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+ min_xid, max_xid);
+ return NULL;
+ }
+
+ /*
+ * Memory need for EM
+ */
+ len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
+ len += sizeof(struct fc_exch_mgr);
+
+ mp = kzalloc(len, GFP_ATOMIC);
+ if (mp) {
+ mp->class = class;
+ mp->total_exches = 0;
+ mp->exches = (struct fc_exch **)(mp + 1);
+ mp->last_xid = min_xid - 1;
+ mp->min_xid = min_xid;
+ mp->max_xid = max_xid;
+ mp->lp = lp;
+ INIT_LIST_HEAD(&mp->ex_list);
+
+ spin_lock_init(&mp->em_lock);
+
+ sprintf(mp->em_cache_name,
+ "libfc-host%d-EM%d",
+ lp->host->host_no, em_idx);
+ mp->em_cache = kmem_cache_create(mp->em_cache_name,
+ sizeof(struct fc_exch),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!mp->em_cache) {
+ kfree(mp);
+ mp = NULL;
+ }
+ }
+ return mp;
+}
+EXPORT_SYMBOL(fc_exch_mgr_alloc);
+
+void fc_exch_mgr_free(struct fc_exch_mgr *mp)
+{
+ WARN_ON(!mp);
+ /*
+ * The total exch count must be zero
+ * before freeing exchange manager.
+ */
+ WARN_ON(mp->total_exches != 0);
+ kmem_cache_destroy(mp->em_cache);
+ kfree(mp);
+}
+EXPORT_SYMBOL(fc_exch_mgr_free);
+
+void fc_exch_done(struct fc_seq *sp)
+{
+ struct fc_exch *ep;
+
+ ep = fc_seq_exch(sp);
+ spin_lock_bh(&ep->ex_lock);
+ ep->esb_stat |= ESB_ST_COMPLETE;
+ ep->resp = NULL;
+ if (timer_pending(&ep->ex_timer)) {
+ del_timer(&ep->ex_timer);
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ }
+ spin_unlock_bh(&ep->ex_lock);
+ fc_exch_release(fc_seq_exch(sp));
+}
+EXPORT_SYMBOL(fc_exch_done);
+
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
+{
+ if (!lp || !lp->emp)
+ return NULL;
+ return fc_exch_alloc(lp->emp, 0);
+}
+EXPORT_SYMBOL(fc_exch_get);
+
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void *resp_arg, u32 timer_msec,
+ u32 sid, u32 did, u32 f_ctl)
+{
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ struct fc_frame_header *fh;
+ u16 fill;
+
+ ep = lp->tt.exch_get(lp, fp);
+ if (!ep) {
+ fc_frame_free(fp);
+ return NULL;
+ }
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fc_exch_set_addr(ep, sid, did);
+ ep->resp = resp;
+ ep->resp_arg = resp_arg;
+ ep->r_a_tov = FC_DEF_R_A_TOV;
+ ep->lp = lp;
+ sp = &ep->seq;
+ WARN_ON((sp->f_ctl & FC_FC_END_SEQ) != 0);
+
+ fr_sof(fp) = ep->class;
+ if (sp->cnt)
+ fr_sof(fp) = fc_sof_normal(ep->class);
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(ep->class))
+ fr_eof(fp) = FC_EOF_N;
+
+ fc_seq_fill_hdr(sp, fp);
+ /*
+ * Form f_ctl.
+ * The number of fill bytes to make the length a 4-byte multiple is
+ * the low order 2-bits of the f_ctl. The fill itself will have been
+ * cleared by the frame allocation.
+ * After this, the length will be even, as expected by the transport.
+ * Don't include the fill in the f_ctl saved in the sequence.
+ */
+ fill = fr_len(fp) & 3;
+ if (fill) {
+ fill = 4 - fill;
+ /* TODO, this may be a problem with fragmented skb */
+ skb_put(fp_skb(fp), fill);
+ }
+ f_ctl |= ep->f_ctl;
+ fh = fc_frame_header_get(fp);
+ hton24(fh->fh_f_ctl, f_ctl | fill);
+ fh->fh_seq_cnt = htons(sp->cnt++);
+
+ if (unlikely(lp->tt.frame_send(lp, fp)))
+ goto err;
+
+ spin_lock_bh(&ep->ex_lock);
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+ sp->f_ctl = f_ctl; /* save for possible abort */
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ return sp;
+err:
+ fc_exch_complete(ep);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_seq_send);
+
+/*
+ * Receive a frame
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ u32 f_ctl;
+
+ if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
+ FC_DBG("fc_lport or EM is not allocated and configured");
+ fc_frame_free(fp);
+ return;
+ }
+
+ /*
+ * If frame is marked invalid, just drop it.
+ */
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ switch (fr_eof(fp)) {
+ case FC_EOF_T:
+ if (f_ctl & FC_FC_END_SEQ)
+ skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
+ /* fall through */
+ case FC_EOF_N:
+ if (fh->fh_type == FC_TYPE_BLS)
+ fc_exch_recv_bls(mp, fp);
+ else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
+ FC_FC_EX_CTX)
+ fc_exch_recv_seq_resp(mp, fp);
+ else if (f_ctl & FC_FC_SEQ_CTX)
+ fc_exch_recv_resp(mp, fp);
+ else
+ fc_exch_recv_req(lp, mp, fp);
+ break;
+ default:
+ FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+ fc_frame_free(fp);
+ break;
+ }
+}
+EXPORT_SYMBOL(fc_exch_recv);
+
+int fc_exch_init(struct fc_lport *lp)
+{
+ if (!lp->tt.exch_get) {
+ /*
+ * exch_put() should be NULL if
+ * exch_get() is NULL
+ */
+ WARN_ON(lp->tt.exch_put);
+ lp->tt.exch_get = fc_exch_get;
+ }
+
+ if (!lp->tt.seq_start_next)
+ lp->tt.seq_start_next = fc_seq_start_next;
+
+ if (!lp->tt.exch_seq_send)
+ lp->tt.exch_seq_send = fc_exch_seq_send;
+
+ if (!lp->tt.seq_send)
+ lp->tt.seq_send = fc_seq_send;
+
+ if (!lp->tt.seq_els_rsp_send)
+ lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
+
+ if (!lp->tt.exch_done)
+ lp->tt.exch_done = fc_exch_done;
+
+ if (!lp->tt.exch_mgr_reset)
+ lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
+
+ if (!lp->tt.seq_exch_abort)
+ lp->tt.seq_exch_abort = fc_seq_exch_abort;
+
+ if (!lp->tt.seq_get_xids)
+ lp->tt.seq_get_xids = fc_seq_get_xids;
+
+ if (!lp->tt.seq_set_rec_data)
+ lp->tt.seq_set_rec_data = fc_seq_set_rec_data;
+ return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 0000000..a5f7aba
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2121 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc/libfc.h>
+
+int fc_fcp_debug;
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE 0 /* cmd is free */
+#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
+#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
+#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
+#define FC_SRB_ABORTED (1 << 3) /* abort acknowleged */
+#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
+#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
+
+#define FC_SRB_READ (1 << 1)
+#define FC_SRB_WRITE (1 << 0)
+
+/*
+ * scsi request structure, one for each scsi request
+ */
+struct fc_fcp_pkt {
+ /*
+ * housekeeping stuff
+ */
+ struct fc_lport *lp; /* handle to hba struct */
+ u16 state; /* scsi_pkt state state */
+ u16 tgt_flags; /* target flags */
+ atomic_t ref_cnt; /* only used byr REC ELS */
+ spinlock_t scsi_pkt_lock; /* Must be taken before the host lock
+ * if both are held at the same time */
+ /*
+ * SCSI I/O related stuff
+ */
+ struct scsi_cmnd *cmd; /* scsi command pointer. set/clear
+ * under host lock */
+ struct list_head list; /* tracks queued commands. access under
+ * host lock */
+ /*
+ * timeout related stuff
+ */
+ struct timer_list timer; /* command timer */
+ struct completion tm_done;
+ int wait_for_comp;
+ unsigned long start_time; /* start jiffie */
+ unsigned long end_time; /* end jiffie */
+ unsigned long last_pkt_time; /* jiffies of last frame received */
+
+ /*
+ * scsi cmd and data transfer information
+ */
+ u32 data_len;
+ /*
+ * transport related veriables
+ */
+ struct fcp_cmnd cdb_cmd;
+ size_t xfer_len;
+ u32 xfer_contig_end; /* offset of end of contiguous xfer */
+ u16 max_payload; /* max payload size in bytes */
+
+ /*
+ * scsi/fcp return status
+ */
+ u32 io_status; /* SCSI result upper 24 bits */
+ u8 cdb_status;
+ u8 status_code; /* FCP I/O status */
+ /* bit 3 Underrun bit 2: overrun */
+ u8 scsi_comp_flags;
+ u32 req_flags; /* bit 0: read bit:1 write */
+ u32 scsi_resid; /* residule length */
+
+ struct fc_rport *rport; /* remote port pointer */
+ struct fc_seq *seq_ptr; /* current sequence pointer */
+ /*
+ * Error Processing
+ */
+ u8 recov_retry; /* count of recovery retries */
+ struct fc_seq *recov_seq; /* sequence for REC or SRR */
+};
+
+/*
+ * The SCp.ptr should be tested and set under the host lock. NULL indicates
+ * that the command has been retruned to the scsi layer.
+ */
+#define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
+#define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
+#define CMD_COMPL_STATUS(Cmnd) ((Cmnd)->SCp.this_residual)
+#define CMD_SCSI_STATUS(Cmnd) ((Cmnd)->SCp.Status)
+#define CMD_RESID_LEN(Cmnd) ((Cmnd)->SCp.buffers_residual)
+
+struct fc_fcp_internal {
+ mempool_t *scsi_pkt_pool;
+ struct list_head scsi_pkt_queue;
+};
+
+#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp);
+static void fc_abort_internal(struct fc_fcp_pkt *);
+static void fc_timeout_error(struct fc_fcp_pkt *);
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *);
+static int fc_fcp_send_cmd(struct fc_fcp_pkt *);
+static void fc_fcp_timeout(unsigned long data);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE 0
+#define FC_CMD_ABORTED 1
+#define FC_CMD_RESET 2
+#define FC_CMD_PLOGO 3
+#define FC_SNS_RCV 4
+#define FC_TRANS_ERR 5
+#define FC_DATA_OVRRUN 6
+#define FC_DATA_UNDRUN 7
+#define FC_ERROR 8
+#define FC_HRD_ERROR 9
+#define FC_CMD_TIME_OUT 10
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_ER_TIMEOUT (10 * HZ)
+#define FC_SCSI_TM_TOV (10 * HZ)
+#define FC_SCSI_REC_TOV (2 * HZ)
+
+#define FC_MAX_ERROR_CNT 5
+#define FC_MAX_RECOV_RETRY 3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc - allocation routine for scsi_pkt packet
+ * @lp: fc lport struct
+ *
+ * This is used by upper layer scsi driver.
+ * Return Value : scsi_pkt structure or null on allocation failure.
+ * Context : call from process context. no locking required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ struct fc_fcp_pkt *sp;
+
+ sp = mempool_alloc(si->scsi_pkt_pool, GFP_ATOMIC);
+ if (sp) {
+ memset(sp, 0, sizeof(*sp));
+ sp->lp = lp;
+ atomic_set(&sp->ref_cnt, 1);
+ init_timer(&sp->timer);
+ INIT_LIST_HEAD(&sp->list);
+ }
+ return sp;
+}
+
+/**
+ * fc_fcp_pkt_free - free routine for scsi_pkt packet
+ * @sp: fcp packet struct
+ *
+ * This is used by upper layer scsi driver.
+ * Context : call from process and interrupt context.
+ * no locking required
+ */
+static void fc_fcp_pkt_free(struct fc_fcp_pkt *sp)
+{
+ if (atomic_dec_and_test(&sp->ref_cnt)) {
+ struct fc_fcp_internal *si = fc_get_scsi_internal(sp->lp);
+
+ mempool_free(sp, si->scsi_pkt_pool);
+ }
+}
+
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *sp)
+{
+ atomic_inc(&sp->ref_cnt);
+}
+
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *sp)
+{
+ fc_fcp_pkt_free(sp);
+}
+
+/**
+ * fc_fcp_lock_pkt - lock a packet and get a ref to it.
+ * @fsp: fcp packet
+ *
+ * We should only return error if we return a command to scsi-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+ /*
+ * TODO mnc: locking is not right. This can be called
+ * from a timer context so we need to stop bottom halves from the
+ * thread caller.
+ *
+ * It can also be called while sending packets, which can result
+ * in bh's being enabled and disabled.
+ */
+ spin_lock(&fsp->scsi_pkt_lock);
+ if (!fsp->cmd) {
+ spin_unlock(&fsp->scsi_pkt_lock);
+ FC_DBG("Invalid scsi cmd pointer on fcp packet.\n");
+ return -EINVAL;
+ }
+
+ fc_fcp_pkt_hold(fsp);
+ return 0;
+}
+
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_unlock(&fsp->scsi_pkt_lock);
+ fc_fcp_pkt_release(fsp);
+}
+
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+ if (!(fsp->state & FC_SRB_COMPL))
+ mod_timer(&fsp->timer, jiffies + delay);
+}
+
+/*
+ * End a request with a retry suggestion.
+ */
+static void fc_fcp_retry(struct fc_fcp_pkt *fsp)
+{
+ fsp->status_code = FC_ERROR;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ fc_fcp_complete(fsp);
+}
+
+/*
+ * Receive SCSI data from target.
+ * Called after receiving solicited data.
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct scsi_cmnd *sc = fsp->cmd;
+ struct fc_lport *lp = fsp->lp;
+ struct fcoe_dev_stats *sp;
+ struct fc_frame_header *fh;
+ size_t start_offset;
+ size_t offset;
+ u32 crc;
+ u32 copy_len = 0;
+ size_t len;
+ void *buf;
+ struct scatterlist *sg;
+ size_t remaining;
+
+ fh = fc_frame_header_get(fp);
+ offset = ntohl(fh->fh_parm_offset);
+ start_offset = offset;
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ if (offset + len > fsp->data_len) {
+ /*
+ * this should never happen
+ */
+ if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+ fc_frame_crc_check(fp))
+ goto crc_err;
+ if (fc_fcp_debug) {
+ FC_DBG("data received past end. "
+ "len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
+ }
+ fc_fcp_retry(fsp);
+ return;
+ }
+ if (offset != fsp->xfer_len)
+ fsp->state |= FC_SRB_DISCONTIG;
+
+ crc = 0;
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+ crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+
+ sg = scsi_sglist(sc);
+ remaining = len;
+
+ while (remaining > 0 && sg) {
+ size_t off;
+ void *page_addr;
+ size_t sg_bytes;
+
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ sg_bytes = min(remaining, sg->length - offset);
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we are limited to mapping PAGE_SIZE at a time.
+ */
+ off = offset + sg->offset;
+ sg_bytes = min(sg_bytes, (size_t)
+ (PAGE_SIZE - (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ if (!page_addr)
+ break; /* XXX panic? */
+
+ if (!(fsp->state & FC_SRB_ABORT_PENDING)) {
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED)
+ crc = crc32(crc, buf, sg_bytes);
+ memcpy((char *)page_addr + (off & ~PAGE_MASK), buf,
+ sg_bytes);
+ }
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ buf += sg_bytes;
+ offset += sg_bytes;
+ remaining -= sg_bytes;
+ copy_len += sg_bytes;
+ }
+
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+ buf = fc_frame_payload_get(fp, 0);
+ if (len % 4) {
+ crc = crc32(crc, buf + len, 4 - (len % 4));
+ len += 4 - (len % 4);
+ }
+
+ if (~crc != le32_to_cpu(*(__le32 *)(buf + len))) {
+crc_err:
+ sp = lp->dev_stats[smp_processor_id()];
+ sp->ErrorFrames++;
+ if (sp->InvalidCRCCount++ < 5)
+ FC_DBG("CRC error on data frame\n");
+ /*
+ * Assume the frame is total garbage.
+ * We may have copied it over the good part
+ * of the buffer.
+ * If so, we need to retry the entire operation.
+ * Otherwise, ignore it.
+ */
+ if (fsp->state & FC_SRB_DISCONTIG)
+ fc_fcp_retry(fsp);
+ return;
+ }
+ }
+
+ if (fsp->xfer_contig_end == start_offset)
+ fsp->xfer_contig_end += copy_len;
+ fsp->xfer_len += copy_len;
+
+ /*
+ * In the very rare event that this data arrived after the response
+ * and completes the transfer, call the completion handler.
+ */
+ if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
+ fc_fcp_complete(fsp);
+}
+
+/*
+ * Send SCSI data to target.
+ * Called after receiving a Transfer Ready data descriptor.
+ */
+static void fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *sp,
+ size_t offset, size_t len,
+ struct fc_frame *oldfp, int sg_supp)
+{
+ struct scsi_cmnd *sc;
+ struct scatterlist *sg;
+ struct fc_frame *fp = NULL;
+ struct fc_lport *lp = fsp->lp;
+ size_t remaining;
+ size_t mfs;
+ size_t tlen;
+ size_t sg_bytes;
+ size_t frame_offset;
+ int error;
+ void *data = NULL;
+ void *page_addr;
+ int using_sg = sg_supp;
+ u32 f_ctl;
+
+ if (unlikely(offset + len > fsp->data_len)) {
+ /*
+ * this should never happen
+ */
+ if (fc_fcp_debug) {
+ FC_DBG("xfer-ready past end. len %zx offset %zx\n",
+ len, offset);
+ }
+ fc_abort_internal(fsp);
+ return;
+ } else if (offset != fsp->xfer_len) {
+ /*
+ * Out of Order Data Request - no problem, but unexpected.
+ */
+ if (fc_fcp_debug) {
+ FC_DBG("xfer-ready non-contiguous. "
+ "len %zx offset %zx\n", len, offset);
+ }
+ }
+ mfs = fsp->max_payload;
+ WARN_ON(mfs > FC_MAX_PAYLOAD);
+ WARN_ON(mfs < FC_MIN_MAX_PAYLOAD);
+ if (mfs > 512)
+ mfs &= ~(512 - 1); /* round down to block size */
+ WARN_ON(mfs < FC_MIN_MAX_PAYLOAD); /* won't go below 256 */
+ WARN_ON(len <= 0);
+ sc = fsp->cmd;
+
+ remaining = len;
+ frame_offset = offset;
+ tlen = 0;
+ sp = lp->tt.seq_start_next(sp);
+ f_ctl = FC_FC_REL_OFF;
+ WARN_ON(!sp);
+
+ /*
+ * If a get_page()/put_page() will fail, don't use sg lists
+ * in the fc_frame structure.
+ *
+ * The put_page() may be long after the I/O has completed
+ * in the case of FCoE, since the network driver does it
+ * via free_skb(). See the test in free_pages_check().
+ *
+ * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
+ */
+ if (using_sg) {
+ for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
+ if (page_count(sg_page(sg)) == 0 ||
+ (sg_page(sg)->flags & (1 << PG_lru |
+ 1 << PG_private |
+ 1 << PG_locked |
+ 1 << PG_active |
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+ 1 << PG_buddy))) {
+ using_sg = 0;
+ break;
+ }
+ }
+ }
+ sg = scsi_sglist(sc);
+
+ while (remaining > 0 && sg) {
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ if (!fp) {
+ tlen = min(mfs, remaining);
+
+ /*
+ * TODO. Temporary workaround. fc_seq_send() can't
+ * handle odd lengths in non-linear skbs.
+ * This will be the final fragment only.
+ */
+ if (tlen % 4)
+ using_sg = 0;
+ if (using_sg) {
+ fp = _fc_frame_alloc(lp, 0);
+ } else {
+ fp = fc_frame_alloc(lp, tlen);
+ data = (void *)(fr_hdr(fp)) +
+ sizeof(struct fc_frame_header);
+ }
+ BUG_ON(!fp);
+ fc_frame_setup(fp, FC_RCTL_DD_SOL_DATA, FC_TYPE_FCP);
+ fc_frame_set_offset(fp, frame_offset);
+ }
+ sg_bytes = min(tlen, sg->length - offset);
+ if (using_sg) {
+ WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
+ FC_FRAME_SG_LEN);
+ get_page(sg_page(sg));
+ skb_fill_page_desc(fp_skb(fp),
+ skb_shinfo(fp_skb(fp))->nr_frags,
+ sg_page(sg), sg->offset + offset,
+ sg_bytes);
+ fp_skb(fp)->data_len += sg_bytes;
+ fr_len(fp) += sg_bytes;
+ fp_skb(fp)->truesize += PAGE_SIZE;
+ } else {
+ size_t off = offset + sg->offset;
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we must not cross pages inside the kmap.
+ */
+ sg_bytes = min(sg_bytes, (size_t) (PAGE_SIZE -
+ (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) +
+ (off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+ sg_bytes);
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+ data += sg_bytes;
+ }
+ offset += sg_bytes;
+ frame_offset += sg_bytes;
+ tlen -= sg_bytes;
+ remaining -= sg_bytes;
+
+ if (remaining == 0) {
+ /*
+ * Send a request sequence with
+ * transfer sequence initiative.
+ */
+ f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+ error = lp->tt.seq_send(lp, sp, fp, f_ctl);
+ } else if (tlen == 0) {
+ /*
+ * send fragment using for a sequence.
+ */
+ error = lp->tt.seq_send(lp, sp, fp, f_ctl);
+ } else {
+ continue;
+ }
+ fp = NULL;
+
+ if (error) {
+ WARN_ON(1); /* send error should be rare */
+ fc_fcp_retry(fsp);
+ return;
+ }
+ }
+ fsp->xfer_len += len; /* premature count? */
+}
+
+/*
+ * exch mgr calls this routine to process scsi
+ * exchanges.
+ *
+ * Return : None
+ * Context : called from Soft IRQ context
+ * can not called holding list lock
+ */
+static void fc_fcp_recv(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_lport *lp;
+ struct fc_frame_header *fh;
+ struct fc_data_desc *dd;
+ u8 r_ctl;
+
+ if (IS_ERR(fp))
+ goto errout;
+
+ fh = fc_frame_header_get(fp);
+ r_ctl = fh->fh_r_ctl;
+ lp = fsp->lp;
+
+ if (!(lp->state & LPORT_ST_READY))
+ goto out;
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ fsp->last_pkt_time = jiffies;
+
+ if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+ /*
+ * received XFER RDY from the target
+ * need to send data to the target
+ */
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+ dd = fc_frame_payload_get(fp, sizeof(*dd));
+ WARN_ON(!dd);
+
+ fc_fcp_send_data(fsp, sp,
+ (size_t) ntohl(dd->dd_offset),
+ (size_t) ntohl(dd->dd_len), fp,
+ lp->capabilities & TRANS_C_SG);
+ lp->tt.seq_set_rec_data(sp, fsp->xfer_len);
+ } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+ /*
+ * received a DATA frame
+ * next we will copy the data to the system buffer
+ */
+ WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
+ fc_fcp_recv_data(fsp, fp);
+ lp->tt.seq_set_rec_data(sp, fsp->xfer_contig_end);
+ } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+ fc_fcp_fcp_resp(fsp, fp);
+ } else {
+ FC_DBG("unexpected frame. r_ctl %x\n", r_ctl);
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+errout:
+ if (IS_ERR(fp))
+ fc_fcp_error(fsp, fp);
+}
+
+static void fc_fcp_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fcp_resp *fc_rp;
+ struct fcp_resp_ext *rp_ex;
+ struct fcp_resp_rsp_info *fc_rp_info;
+ u32 plen;
+ u32 expected_len;
+ u32 respl = 0;
+ u32 snsl = 0;
+ u8 flags = 0;
+
+ plen = fr_len(fp);
+ fh = (struct fc_frame_header *)fr_hdr(fp);
+ if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+ goto len_err;
+ plen -= sizeof(*fh);
+ fc_rp = (struct fcp_resp *)(fh + 1);
+ fsp->cdb_status = fc_rp->fr_status;
+ flags = fc_rp->fr_flags;
+ fsp->scsi_comp_flags = flags;
+ expected_len = fsp->data_len;
+
+ if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+ rp_ex = (void *)(fc_rp + 1);
+ if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+ if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+ goto len_err;
+ fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+ if (flags & FCP_RSP_LEN_VAL) {
+ respl = ntohl(rp_ex->fr_rsp_len);
+ if (respl != sizeof(*fc_rp_info))
+ goto len_err;
+ if (fsp->wait_for_comp) {
+ /* Abuse cdb_status for rsp code */
+ fsp->cdb_status = fc_rp_info->rsp_code;
+ complete(&fsp->tm_done);
+ /*
+ * tmfs will not have any scsi cmd so
+ * exit here
+ */
+ return;
+ } else
+ goto err;
+ }
+ if (flags & FCP_SNS_LEN_VAL) {
+ snsl = ntohl(rp_ex->fr_sns_len);
+ if (snsl > SCSI_SENSE_BUFFERSIZE)
+ snsl = SCSI_SENSE_BUFFERSIZE;
+ memcpy(fsp->cmd->sense_buffer,
+ (char *)fc_rp_info + respl, snsl);
+ }
+ }
+ if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+ if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+ goto len_err;
+ if (flags & FCP_RESID_UNDER) {
+ fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+ /*
+ * The cmnd->underflow is the minimum number of
+ * bytes that must be transfered for this
+ * command. Provided a sense condition is not
+ * present, make sure the actual amount
+ * transferred is at least the underflow value
+ * or fail.
+ */
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (fc_rp->fr_status == 0) &&
+ (scsi_bufflen(fsp->cmd) -
+ fsp->scsi_resid) < fsp->cmd->underflow)
+ goto err;
+ expected_len -= fsp->scsi_resid;
+ } else {
+ fsp->status_code = FC_ERROR;
+ }
+ }
+ }
+ fsp->state |= FC_SRB_RCV_STATUS;
+
+ /*
+ * Check for missing or extra data frames.
+ */
+ if (unlikely(fsp->xfer_len != expected_len)) {
+ if (fsp->xfer_len < expected_len) {
+ /*
+ * Some data may be queued locally,
+ * Wait a at least one jiffy to see if it is delivered.
+ * If this expires without data, we may do SRR.
+ */
+ fc_fcp_timer_set(fsp, 2);
+ return;
+ }
+ fsp->status_code = FC_DATA_OVRRUN;
+ FC_DBG("tgt %6x xfer len %zx greater than expected len %x. "
+ "data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ }
+ fc_fcp_complete(fsp);
+ return;
+
+len_err:
+ FC_DBG("short FCP response. flags 0x%x len %u respl %u snsl %u\n",
+ flags, fr_len(fp), respl, snsl);
+err:
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete(fsp);
+}
+
+/**
+ * fc_fcp_complete - complete processing of a fcp packet
+ * @fsp: fcp packet
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp = fsp->lp;
+ struct fc_seq *sp;
+ u32 f_ctl;
+
+ /*
+ * Test for transport underrun, independent of response underrun status.
+ */
+ if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+ (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ fsp->status_code = FC_DATA_UNDRUN;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ }
+
+ sp = fsp->seq_ptr;
+ if (sp) {
+ fsp->seq_ptr = NULL;
+ if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+ struct fc_frame *conf_frame;
+ struct fc_seq *csp;
+
+ csp = lp->tt.seq_start_next(sp);
+ conf_frame = fc_frame_alloc(fsp->lp, 0);
+ if (conf_frame) {
+ fc_frame_setup(conf_frame,
+ FC_RCTL_DD_SOL_CTL, FC_TYPE_FCP);
+ f_ctl = FC_FC_SEQ_INIT;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ lp->tt.seq_send(lp, csp, conf_frame, f_ctl);
+ } else
+ lp->tt.exch_done(csp);
+ } else
+ lp->tt.exch_done(sp);
+ }
+ fc_io_compl(fsp);
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd - run fn on each active command
+ * @lp: logical port
+ * @id: target id
+ * @lun: lun
+ * @fn: actor function
+ *
+ * If lun or id is -1, they are ignored.
+ *
+ * @fn must not call fs_io_compl on the fsp.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lp, unsigned int id,
+ unsigned int lun,
+ void (*fn)(struct fc_fcp_pkt *))
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ struct fc_fcp_pkt *fsp;
+ struct scsi_cmnd *sc_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+restart:
+ list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+ sc_cmd = fsp->cmd;
+ if (id != -1 && scmd_id(sc_cmd) != id)
+ continue;
+
+ if (lun != -1 && sc_cmd->device->lun != lun)
+ continue;
+
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ if (!fc_fcp_lock_pkt(fsp)) {
+ fn(fsp);
+ fc_io_compl(fsp);
+ fc_fcp_unlock_pkt(fsp);
+ }
+
+ fc_fcp_pkt_release(fsp);
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ /*
+ * while we dropped the lock multiple pkts could
+ * have been released, so we have to start over.
+ */
+ goto restart;
+ }
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+}
+
+static void fc_fcp_cleanup_aborted_io(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp = fsp->lp;
+
+ if (!(fsp->state & FC_SRB_RCV_STATUS)) {
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ }
+ fsp->status_code = FC_ERROR;
+ fsp->io_status = (SUGGEST_RETRY << 24);
+}
+
+static void fc_fcp_abort_io(struct fc_lport *lp)
+{
+ fc_fcp_cleanup_each_cmd(lp, -1, -1, fc_fcp_cleanup_aborted_io);
+}
+
+/**
+ * fc_fcp_pkt_send - send a fcp packet to the lower level.
+ * @lp: fc lport
+ * @fsp: fc packet.
+ *
+ * This is called by upper layer protocol.
+ * Return : zero for success and -1 for failure
+ * Context : called from queuecommand which can be called from process
+ * or scsi soft irq.
+ * Locks : called with the host lock and irqs disabled.
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+ int rc;
+
+ fsp->cmd->SCp.ptr = (char *)fsp;
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+ int_to_scsilun(fsp->cmd->device->lun,
+ (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+ memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+
+ spin_unlock_irq(lp->host->host_lock);
+ rc = fc_fcp_send_cmd(fsp);
+ spin_lock_irq(lp->host->host_lock);
+ if (rc)
+ list_del(&fsp->list);
+
+ return rc;
+}
+static void fc_fcp_retry_send_cmd(unsigned long data)
+{
+ fc_fcp_send_cmd((struct fc_fcp_pkt *)data);
+}
+
+static int fc_fcp_send_cmd(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp;
+ struct fc_frame *fp;
+ struct fc_seq *sp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return -1;
+
+ if (fsp->state & FC_SRB_COMPL)
+ goto unlock;
+
+ lp = fsp->lp;
+ fp = fc_frame_alloc(lp, sizeof(fsp->cdb_cmd));
+ if (!fp)
+ goto retry;
+ memcpy(fc_frame_payload_get(fp, sizeof(fsp->cdb_cmd)),
+ &fsp->cdb_cmd, sizeof(fsp->cdb_cmd));
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
+ fc_frame_set_offset(fp, 0);
+ rport = fsp->rport;
+ fsp->max_payload = rport->maxframe_size;
+ rp = rport->dd_data;
+ sp = lp->tt.exch_seq_send(lp, fp,
+ fc_fcp_recv,
+ fsp, 0,
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+ if (!sp) {
+ fc_frame_free(fp);
+ goto retry;
+ }
+ fsp->seq_ptr = sp;
+
+ setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp,
+ (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
+ FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+ return 0;
+retry:
+ setup_timer(&fsp->timer, fc_fcp_retry_send_cmd, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ fc_fcp_unlock_pkt(fsp);
+ return 0;
+}
+
+/*
+ * transport error handler
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_lport *lp = fsp->lp;
+
+ if (lp->state == LPORT_ST_LOGO)
+ return;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ FC_DBG("unknown error %ld\n", PTR_ERR(fp));
+ fsp->status_code = FC_CMD_PLOGO;
+ fc_fcp_complete(fsp);
+ fc_fcp_unlock_pkt(fsp);
+}
+
+static void fc_abort_internal(struct fc_fcp_pkt *fsp)
+{
+ fsp->state |= FC_SRB_ABORT_PENDING;
+ fsp->cdb_status = -1;
+ if (fsp->lp->tt.seq_exch_abort(fsp->seq_ptr))
+ fc_fcp_complete(fsp); /* abort couldn't be sent */
+ else
+ fsp->seq_ptr = NULL;
+}
+
+/*
+ * Scsi abort handler- calls to send an abort
+ * and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_lport *lp, struct fc_fcp_pkt *fsp)
+{
+ int rc = FAILED;
+
+ if (!fsp->seq_ptr)
+ return rc;
+ if (lp->tt.seq_exch_abort(fsp->seq_ptr))
+ return rc;
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+
+ init_completion(&fsp->tm_done);
+ fsp->wait_for_comp = 1;
+
+ spin_unlock(&fsp->scsi_pkt_lock);
+ rc = wait_for_completion_timeout(&fsp->tm_done,
+ msecs_to_jiffies(FC_SCSI_TM_TOV));
+ spin_lock(&fsp->scsi_pkt_lock);
+
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+
+ if (!rc) {
+ FC_DBG("target abort cmd failed\n");
+ rc = FAILED;
+ } else if (fsp->state & FC_SRB_ABORTED) {
+ FC_DBG("target abort cmd passed\n");
+ rc = SUCCESS;
+
+ fsp->status_code = FC_CMD_ABORTED;
+ fc_io_compl(fsp);
+ }
+
+ return rc;
+}
+
+/*
+ * Retry LUN reset after resource allocation failed.
+ */
+static void fc_lun_reset_send(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ const size_t len = sizeof(fsp->cdb_cmd);
+ struct fc_lport *lp = fsp->lp;
+ struct fc_frame *fp;
+ struct fc_seq *sp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+
+ spin_lock(&fsp->scsi_pkt_lock);
+ if (fsp->state & FC_SRB_COMPL)
+ goto unlock;
+
+ fp = fc_frame_alloc(lp, len);
+ if (!fp)
+ goto retry;
+ memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CMD, FC_TYPE_FCP);
+ fc_frame_set_offset(fp, 0);
+ rport = fsp->rport;
+ rp = rport->dd_data;
+ sp = lp->tt.exch_seq_send(lp, fp,
+ fc_tm_done,
+ fsp, 0,
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+
+ if (sp) {
+ fsp->seq_ptr = sp;
+ goto unlock;
+ }
+ /*
+ * Exchange or frame allocation failed. Set timer and retry.
+ */
+ fc_frame_free(fp);
+retry:
+ setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+unlock:
+ spin_unlock(&fsp->scsi_pkt_lock);
+}
+
+static void fc_fcp_cleanup_lun_reset(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp = fsp->lp;
+
+ fsp->status_code = FC_CMD_ABORTED;
+ if (fsp->seq_ptr) {
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+}
+
+/*
+ * Scsi device reset handler- send a LUN RESET to the device
+ * and wait for reset reply
+ */
+static int fc_lun_reset(struct fc_lport *lp, struct fc_fcp_pkt *fsp,
+ unsigned int id, unsigned int lun)
+{
+ int rc;
+
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+ int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
+
+ fsp->wait_for_comp = 1;
+ init_completion(&fsp->tm_done);
+
+ fc_lun_reset_send((unsigned long)fsp);
+
+ /*
+ * wait for completion of reset
+ * after that make sure all commands are terminated
+ */
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+ spin_lock(&fsp->scsi_pkt_lock);
+ fsp->state |= FC_SRB_COMPL;
+ spin_unlock(&fsp->scsi_pkt_lock);
+
+ del_timer_sync(&fsp->timer);
+
+ spin_lock(&fsp->scsi_pkt_lock);
+ if (fsp->seq_ptr) {
+ /* TODO:
+ * if the exch resp function is running and trying to grab
+ * the scsi_pkt_lock, this could free the exch from under
+ * it and it could allow the fsp to be freed from under
+ * fc_tm_done.
+ */
+ lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->wait_for_comp = 0;
+ spin_unlock(&fsp->scsi_pkt_lock);
+
+ if (!rc) {
+ FC_DBG("lun reset failed\n");
+ return FAILED;
+ }
+
+ /* cdb_status holds the tmf's rsp code */
+ if (fsp->cdb_status != FCP_TMF_CMPL)
+ return FAILED;
+
+ FC_DBG("lun reset to lun %u completed\n", lun);
+ fc_fcp_cleanup_each_cmd(lp, id, lun, fc_fcp_cleanup_lun_reset);
+ return SUCCESS;
+}
+
+/*
+ * Task Managment response handler
+ */
+static void fc_tm_done(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+
+ spin_lock(&fsp->scsi_pkt_lock);
+ /*
+ * raced with eh timeout handler.
+ *
+ * TODO: If this happens we could be freeing the fsp right now and
+ * would oops. Next patches will fix this race.
+ */
+ if ((fsp->state & FC_SRB_COMPL) || !fsp->seq_ptr ||
+ !fsp->wait_for_comp) {
+ spin_unlock(&fsp->scsi_pkt_lock);
+ return;
+ }
+
+ if (IS_ERR(fp)) {
+ /*
+ * If there is an error just let it timeout.
+ * scsi-eh will escalate for us.
+ */
+ spin_unlock(&fsp->scsi_pkt_lock);
+ return;
+ }
+
+ fc_fcp_fcp_resp(fsp, fp);
+ fsp->seq_ptr = NULL;
+ fsp->lp->tt.exch_done(sp);
+ fc_frame_free(fp);
+ spin_unlock(&fsp->scsi_pkt_lock);
+}
+
+static void fc_fcp_cleanup_io(struct fc_fcp_pkt *fsp)
+{
+ fsp->status_code = FC_HRD_ERROR;
+}
+
+static void fc_fcp_cleanup(struct fc_lport *lp)
+{
+ fc_fcp_cleanup_each_cmd(lp, -1, -1, fc_fcp_cleanup_io);
+}
+
+/*
+ * fc_fcp_timeout: called by OS timer function.
+ *
+ * The timer has been inactivated and must be reactivated if desired
+ * using fc_fcp_timer_set().
+ *
+ * Algorithm:
+ *
+ * If REC is supported, just issue it, and return. The REC exchange will
+ * complete or time out, and recovery can continue at that point.
+ *
+ * Otherwise, if the response has been received without all the data,
+ * it has been ER_TIMEOUT since the response was received.
+ *
+ * If the response has not been received,
+ * we see if data was received recently. If it has been, we continue waiting,
+ * otherwise, we abort the command.
+ */
+static void fc_fcp_timeout(unsigned long data)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
+ struct fc_rport *rport = fsp->rport;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ if (fsp->state & FC_SRB_COMPL)
+ goto unlock;
+ fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
+
+ if (rp->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_rec(fsp);
+ /* TODO: change this to time_before/after */
+ else if (jiffies - fsp->last_pkt_time < FC_SCSI_ER_TIMEOUT / 2)
+ fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete(fsp);
+ else
+ fc_timeout_error(fsp);
+
+ fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/*
+ * Send a REC ELS request
+ */
+static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp;
+ struct fc_seq *sp;
+ struct fc_frame *fp;
+ struct fc_els_rec *rec;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ u16 ox_id;
+ u16 rx_id;
+
+ lp = fsp->lp;
+ rport = fsp->rport;
+ rp = rport->dd_data;
+ sp = fsp->seq_ptr;
+ if (!sp || rp->rp_state != RPORT_ST_READY) {
+ fsp->status_code = FC_HRD_ERROR;
+ fsp->io_status = SUGGEST_RETRY << 24;
+ fc_fcp_complete(fsp);
+ return;
+ }
+ lp->tt.seq_get_xids(sp, &ox_id, &rx_id);
+ fp = fc_frame_alloc(lp, sizeof(*rec));
+ if (!fp)
+ goto retry;
+
+ rec = fc_frame_payload_get(fp, sizeof(*rec));
+ memset(rec, 0, sizeof(*rec));
+ rec->rec_cmd = ELS_REC;
+ hton24(rec->rec_s_id, lp->fid);
+ rec->rec_ox_id = htons(ox_id);
+ rec->rec_rx_id = htons(rx_id);
+
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ fc_frame_set_offset(fp, 0);
+ sp = lp->tt.exch_seq_send(lp, fp,
+ fc_fcp_rec_resp,
+ fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+
+ if (sp) {
+ fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
+ return;
+ } else
+ fc_frame_free(fp);
+retry:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ else
+ fc_timeout_error(fsp);
+}
+
+/*
+ * Receive handler for REC ELS frame
+ * if it is a reject then let the scsi layer to handle
+ * the timeout. if it is a LS_ACC then if the io was not completed
+ * then set the timeout and return otherwise complete the exchange
+ * and tell the scsi layer to restart the I/O.
+ */
+static void fc_fcp_rec_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_els_rec_acc *recp;
+ struct fc_els_ls_rjt *rjt;
+ u32 e_stat;
+ u8 opcode;
+ u32 offset;
+ enum dma_data_direction data_dir;
+ enum fc_rctl r_ctl;
+ struct fc_rport_libfc_priv *rp;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_rec_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fsp->recov_retry = 0;
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ switch (rjt->er_reason) {
+ default:
+ if (fc_fcp_debug)
+ FC_DBG("device %x unexpected REC reject "
+ "reason %d expl %d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
+ /* fall through */
+
+ case ELS_RJT_UNSUP:
+ if (fc_fcp_debug)
+ FC_DBG("device does not support REC\n");
+ rp = fsp->rport->dd_data;
+ rp->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+ /* fall through */
+
+ case ELS_RJT_LOGIC:
+ case ELS_RJT_UNAB:
+ /*
+ * If no data transfer, the command frame got dropped
+ * so we just retry. If data was transferred, we
+ * lost the response but the target has no record,
+ * so we abort and retry.
+ */
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
+ fsp->xfer_len == 0) {
+ fc_fcp_retry_cmd(fsp);
+ break;
+ }
+ fc_timeout_error(fsp);
+ break;
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ if (fsp->state & FC_SRB_ABORTED)
+ goto unlock_out;
+
+ data_dir = fsp->cmd->sc_data_direction;
+ recp = fc_frame_payload_get(fp, sizeof(*recp));
+ offset = ntohl(recp->reca_fc4value);
+ e_stat = ntohl(recp->reca_e_stat);
+
+ if (e_stat & ESB_ST_COMPLETE) {
+
+ /*
+ * The exchange is complete.
+ *
+ * For output, we must've lost the response.
+ * For input, all data must've been sent.
+ * We lost may have lost the response
+ * (and a confirmation was requested) and maybe
+ * some data.
+ *
+ * If all data received, send SRR
+ * asking for response. If partial data received,
+ * or gaps, SRR requests data at start of gap.
+ * Recovery via SRR relies on in-order-delivery.
+ */
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end == offset) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else {
+ offset = fsp->xfer_contig_end;
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ } else if (e_stat & ESB_ST_SEQ_INIT) {
+
+ /*
+ * The remote port has the initiative, so just
+ * keep waiting for it to complete.
+ */
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ } else {
+
+ /*
+ * The exchange is incomplete, we have seq. initiative.
+ * Lost response with requested confirmation,
+ * lost confirmation, lost transfer ready or
+ * lost write data.
+ *
+ * For output, if not all data was received, ask
+ * for transfer ready to be repeated.
+ *
+ * If we received or sent all the data, send SRR to
+ * request response.
+ *
+ * If we lost a response, we may have lost some read
+ * data as well.
+ */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ if (offset < fsp->data_len)
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ } else if (offset == fsp->xfer_contig_end) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end < offset) {
+ offset = fsp->xfer_contig_end;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ }
+ }
+unlock_out:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle error response or timeout for REC exchange.
+ */
+static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_lport *lp = fsp->lp;
+ int error = PTR_ERR(fp);
+
+ if (lp->state == LPORT_ST_LOGO)
+ return;
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ switch (error) {
+ case -FC_EX_CLOSED:
+ fc_timeout_error(fsp);
+ break;
+
+ default:
+ FC_DBG("REC %p fid %x error unexpected error %d\n",
+ fsp, fsp->rport->port_id, error);
+ fsp->status_code = FC_CMD_PLOGO;
+ /* fall through */
+
+ case -FC_EX_TIMEOUT:
+ /*
+ * Assume REC or LS_ACC was lost.
+ * The exchange manager will have aborted REC, so retry.
+ */
+ FC_DBG("REC fid %x error error %d retry %d/%d\n",
+ fsp->rport->port_id, error, fsp->recov_retry,
+ FC_MAX_RECOV_RETRY);
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_timeout_error(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+}
+
+/*
+ * Time out error routine:
+ * abort's the I/O close the exchange and
+ * send completion notification to scsi layer
+ */
+static void fc_timeout_error(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lp = fsp->lp;
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+ if (fsp->seq_ptr)
+ lp->tt.seq_exch_abort(fsp->seq_ptr);
+
+ fsp->seq_ptr = NULL;
+ fsp->status_code = FC_CMD_TIME_OUT;
+ fsp->cdb_status = 0;
+ fsp->io_status = 0;
+
+ fc_io_compl(fsp);
+}
+
+/*
+ * Retry command.
+ * An abort isn't needed.
+ *
+ * We treat it like a timeout because the command did not complete -
+ * presumably due to cmd packet loss. We will fail the command and
+ * have scsi-ml decide if we should retry or not.
+ *
+ * TODO: Instead we could continue to retry the command until the scsi
+ * command fires, or add port level counters to determine
+ * when to mark it as failed (the latter would be useful in the class eh
+ * for lpfc and qla2xxx).
+ *
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
+{
+ if (fsp->seq_ptr) {
+ fsp->lp->tt.exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+
+ fsp->status_code = FC_CMD_TIME_OUT;
+ fc_fcp_complete(fsp);
+}
+
+/*
+ * Sequence retransmission request.
+ * This is called after receiving status but insufficient data, or
+ * when expecting status but the request has timed out.
+ */
+static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
+{
+ struct fc_lport *lp = fsp->lp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ struct fc_seq *sp;
+ struct fcp_srr *srr;
+ struct fc_frame *fp;
+ u8 cdb_op;
+ u16 ox_id;
+ u16 rx_id;
+
+ rport = fsp->rport;
+ rp = rport->dd_data;
+ cdb_op = fsp->cdb_cmd.fc_cdb[0];
+ lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
+
+ if (!(rp->flags & FC_RP_FLAGS_RETRY) || rp->rp_state != RPORT_ST_READY)
+ goto retry; /* shouldn't happen */
+ fp = fc_frame_alloc(lp, sizeof(*srr));
+ if (!fp)
+ goto retry;
+
+ srr = fc_frame_payload_get(fp, sizeof(*srr));
+ memset(srr, 0, sizeof(*srr));
+ srr->srr_op = ELS_SRR;
+ srr->srr_ox_id = htons(ox_id);
+ srr->srr_rx_id = htons(rx_id);
+ srr->srr_r_ctl = r_ctl;
+ srr->srr_rel_off = htonl(offset);
+
+ fc_frame_setup(fp, FC_RCTL_ELS4_REQ, FC_TYPE_FCP);
+ fc_frame_set_offset(fp, 0);
+ sp = lp->tt.exch_seq_send(lp, fp,
+ fc_fcp_srr_resp,
+ fsp, jiffies_to_msecs(FC_SCSI_REC_TOV),
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+ if (!sp) {
+ fc_frame_free(fp);
+ goto retry;
+ }
+ fsp->recov_seq = sp;
+ fsp->xfer_len = offset;
+ fsp->xfer_contig_end = offset;
+ fsp->state &= ~FC_SRB_RCV_STATUS;
+ fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
+ return;
+retry:
+ fc_fcp_retry(fsp);
+}
+
+/*
+ * Handle response from SRR.
+ */
+static void fc_fcp_srr_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ u16 ox_id;
+ u16 rx_id;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_srr_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fsp->recov_seq = NULL;
+
+ fsp->lp->tt.seq_get_xids(fsp->seq_ptr, &ox_id, &rx_id);
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LS_ACC:
+ fsp->recov_retry = 0;
+ fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
+ break;
+ case ELS_LS_RJT:
+ default:
+ fc_timeout_error(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+ fsp->lp->tt.exch_done(sp);
+out:
+ fc_frame_free(fp);
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+}
+
+static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ fsp->lp->tt.exch_done(fsp->recov_seq);
+ fsp->recov_seq = NULL;
+ switch (PTR_ERR(fp)) {
+ case -FC_EX_CLOSED: /* e.g., link failure */
+ fc_timeout_error(fsp);
+ break;
+ case -FC_EX_TIMEOUT:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_timeout_error(fsp);
+ break;
+ default:
+ fc_fcp_retry(fsp);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding SRR */
+}
+
+/**
+ * fc_queuecommand - The queuecommand function of the scsi template
+ * @cmd: struct scsi_cmnd to be executed
+ * @done: Callback function to be called when cmd is completed
+ *
+ * this is the i/o strategy routine, called by the scsi layer
+ * this routine is called with holding the host_lock.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
+{
+ struct fc_lport *lp;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_fcp_pkt *sp;
+ struct fc_rport_libfc_priv *rp;
+ int rval;
+ int rc = 0;
+ struct fcoe_dev_stats *stats;
+
+ lp = shost_priv(sc_cmd->device->host);
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ done(sc_cmd);
+ goto out;
+ }
+
+ if (!*(struct fc_remote_port **)rport->dd_data) {
+ /*
+ * rport is transitioning from blocked/deleted to
+ * online
+ */
+ sc_cmd->result = DID_IMM_RETRY << 16;
+ done(sc_cmd);
+ goto out;
+ }
+
+ rp = rport->dd_data;
+
+ if (lp->state != LPORT_ST_READY) {
+ if (lp->link_status & FC_PAUSE) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ } else {
+ sc_cmd->result = DID_NO_CONNECT << 16;
+ done(sc_cmd);
+ goto out;
+ }
+ } else {
+ if (!(lp->link_status & FC_LINK_UP)) {
+ sc_cmd->result = DID_NO_CONNECT << 16;
+ done(sc_cmd);
+ goto out;
+ }
+ }
+
+ sp = fc_fcp_pkt_alloc(lp);
+ if (sp == NULL) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /*
+ * build the libfc request pkt
+ */
+ sp->cmd = sc_cmd; /* save the cmd */
+ sp->lp = lp; /* save the softc ptr */
+ sp->rport = rport; /* set the remote port ptr */
+ sc_cmd->scsi_done = done;
+
+ /*
+ * set up the transfer length
+ */
+ sp->data_len = scsi_bufflen(sc_cmd);
+ sp->xfer_len = 0;
+
+ /*
+ * setup the data direction
+ */
+ stats = lp->dev_stats[smp_processor_id()];
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ sp->req_flags = FC_SRB_READ;
+ stats->InputRequests++;
+ stats->InputMegabytes = sp->data_len;
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ sp->req_flags = FC_SRB_WRITE;
+ stats->OutputRequests++;
+ stats->OutputMegabytes = sp->data_len;
+ } else {
+ sp->req_flags = 0;
+ stats->ControlRequests++;
+ }
+
+ sp->tgt_flags = rp->flags;
+
+ init_timer(&sp->timer);
+ sp->timer.data = (unsigned long)sp;
+
+ /*
+ * send it to the lower layer
+ * if we get -1 return then put the request in the pending
+ * queue.
+ */
+ rval = fc_fcp_pkt_send(lp, sp);
+ if (rval != 0) {
+ sp->state = FC_SRB_FREE;
+ fc_fcp_pkt_free(sp);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_queuecommand);
+
+/**
+ * fc_io_compl - Handle responses for completed commands
+ * @sp: scsi packet
+ *
+ * Translates a error to a Linux SCSI error.
+ *
+ * The fcp packet lock must be held when calling.
+ */
+static void fc_io_compl(struct fc_fcp_pkt *sp)
+{
+ struct scsi_cmnd *sc_cmd;
+ struct fc_lport *lp;
+ unsigned long flags;
+
+ sp->state |= FC_SRB_COMPL;
+ if (!(sp->state & FC_SRB_FCP_PROCESSING_TMO)) {
+ spin_unlock(&sp->scsi_pkt_lock);
+ del_timer_sync(&sp->timer);
+ spin_lock(&sp->scsi_pkt_lock);
+ }
+
+ lp = sp->lp;
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ if (!sp->cmd) {
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+ return;
+ }
+
+ sc_cmd = sp->cmd;
+ sp->cmd = NULL;
+
+ if (!sc_cmd->SCp.ptr) {
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+ return;
+ }
+
+ CMD_SCSI_STATUS(sc_cmd) = sp->cdb_status;
+ switch (sp->status_code) {
+ case FC_COMPLETE:
+ if (sp->cdb_status == 0) {
+ /*
+ * good I/O status
+ */
+ sc_cmd->result = DID_OK << 16;
+ if (sp->scsi_resid)
+ CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
+ } else if (sp->cdb_status == QUEUE_FULL) {
+ struct scsi_device *tmp_sdev;
+ struct scsi_device *sdev = sc_cmd->device;
+
+ shost_for_each_device(tmp_sdev, sdev->host) {
+ if (tmp_sdev->id != sdev->id)
+ continue;
+
+ if (tmp_sdev->queue_depth > 1) {
+ scsi_track_queue_full(tmp_sdev,
+ tmp_sdev->
+ queue_depth - 1);
+ }
+ }
+ sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
+ } else {
+ /*
+ * transport level I/O was ok but scsi
+ * has non zero status
+ */
+ sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
+ }
+ break;
+ case FC_ERROR:
+ if (sp->io_status & (SUGGEST_RETRY << 24))
+ sc_cmd->result = DID_IMM_RETRY << 16;
+ else
+ sc_cmd->result = (DID_ERROR << 16) | sp->io_status;
+ break;
+ case FC_DATA_UNDRUN:
+ if (sp->cdb_status == 0) {
+ /*
+ * scsi status is good but transport level
+ * underrun. for read it should be an error??
+ */
+ sc_cmd->result = (DID_OK << 16) | sp->cdb_status;
+ } else {
+ /*
+ * scsi got underrun, this is an error
+ */
+ CMD_RESID_LEN(sc_cmd) = sp->scsi_resid;
+ sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
+ }
+ break;
+ case FC_DATA_OVRRUN:
+ /*
+ * overrun is an error
+ */
+ sc_cmd->result = (DID_ERROR << 16) | sp->cdb_status;
+ break;
+ case FC_CMD_ABORTED:
+ sc_cmd->result = (DID_ABORT << 16) | sp->io_status;
+ break;
+ case FC_CMD_TIME_OUT:
+ sc_cmd->result = (DID_BUS_BUSY << 16) | sp->io_status;
+ break;
+ case FC_CMD_RESET:
+ sc_cmd->result = (DID_RESET << 16);
+ break;
+ case FC_HRD_ERROR:
+ sc_cmd->result = (DID_NO_CONNECT << 16);
+ break;
+ default:
+ sc_cmd->result = (DID_ERROR << 16);
+ break;
+ }
+
+ list_del(&sp->list);
+ sc_cmd->SCp.ptr = NULL;
+ sc_cmd->scsi_done(sc_cmd);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ /* release ref from initial allocation in queue command */
+ fc_fcp_pkt_release(sp);
+}
+
+/**
+ * fc_eh_abort - Abort a command...from scsi host template
+ * @sc_cmd: scsi command to abort
+ *
+ * send ABTS to the target device and wait for the response
+ * sc_cmd is the pointer to the command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_fcp_pkt *sp;
+ struct fc_lport *lp;
+ int rc = FAILED;
+ unsigned long flags;
+
+ lp = shost_priv(sc_cmd->device->host);
+ if (lp->state != LPORT_ST_READY)
+ return rc;
+ else if (!(lp->link_status & FC_LINK_UP))
+ return rc;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+ sp = CMD_SP(sc_cmd);
+ if (!sp) {
+ /* command completed while scsi eh was setting up */
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+ return SUCCESS;
+ }
+ /* grab a ref so the sp and sc_cmd cannot be relased from under us */
+ fc_fcp_pkt_hold(sp);
+ spin_unlock_irqrestore(lp->host->host_lock, flags);
+
+ if (fc_fcp_lock_pkt(sp)) {
+ /* completed while we were waiting for timer to be deleted */
+ rc = SUCCESS;
+ goto release_pkt;
+ }
+
+ sp->state |= FC_SRB_ABORT_PENDING;
+ rc = fc_fcp_pkt_abort(lp, sp);
+ fc_fcp_unlock_pkt(sp);
+
+release_pkt:
+ fc_fcp_pkt_release(sp);
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_abort);
+
+/**
+ * fc_eh_device_reset: Reset a single LUN
+ * @sc_cmd: scsi command
+ *
+ * Set from scsi host template to send tm cmd to the target and wait for the
+ * response.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lp;
+ struct fc_fcp_pkt *sp;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ int rc = FAILED;
+ struct fc_rport_libfc_priv *rp;
+ int rval;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval)
+ goto out;
+
+ rp = rport->dd_data;
+ lp = shost_priv(sc_cmd->device->host);
+
+ if (lp->state != LPORT_ST_READY)
+ return rc;
+
+ sp = fc_fcp_pkt_alloc(lp);
+ if (sp == NULL) {
+ FC_DBG("could not allocate scsi_pkt\n");
+ sc_cmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+
+ /*
+ * Build the libfc request pkt. Do not set the scsi cmnd, because
+ * the sc passed in is not setup for execution like when sent
+ * through the queuecommand callout.
+ */
+ sp->lp = lp; /* save the softc ptr */
+ sp->rport = rport; /* set the remote port ptr */
+
+ /*
+ * flush outstanding commands
+ */
+ rc = fc_lun_reset(lp, sp, scmd_id(sc_cmd), sc_cmd->device->lun);
+ sp->state = FC_SRB_FREE;
+ fc_fcp_pkt_free(sp);
+
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_device_reset);
+
+/**
+ * fc_eh_host_reset - The reset function will reset the ports on the host.
+ * @sc_cmd: scsi command
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lp;
+
+ lp = shost_priv(sc_cmd->device->host);
+ return lp->tt.lport_reset(lp) ? FAILED : SUCCESS;
+}
+EXPORT_SYMBOL(fc_eh_host_reset);
+
+/**
+ * fc_slave_alloc - configure queue depth
+ * @sdev: scsi device
+ *
+ * Configures queue depth based on host's cmd_per_len. If not set
+ * then we use the libfc default.
+ */
+int fc_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ int queue_depth;
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ if (sdev->tagged_supported) {
+ if (sdev->host->hostt->cmd_per_lun)
+ queue_depth = sdev->host->hostt->cmd_per_lun;
+ else
+ queue_depth = FC_FCP_DFLT_QUEUE_DEPTH;
+ scsi_activate_tcq(sdev, queue_depth);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(fc_slave_alloc);
+
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+ return sdev->queue_depth;
+}
+EXPORT_SYMBOL(fc_change_queue_depth);
+
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+EXPORT_SYMBOL(fc_change_queue_type);
+
+void fc_fcp_destroy(struct fc_lport *lp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lp);
+
+ if (!list_empty(&si->scsi_pkt_queue))
+ printk(KERN_ERR "Leaked scsi packets.\n");
+
+ mempool_destroy(si->scsi_pkt_pool);
+ kfree(si);
+ lp->scsi_priv = NULL;
+}
+EXPORT_SYMBOL(fc_fcp_destroy);
+
+int fc_fcp_init(struct fc_lport *lp)
+{
+ int rc;
+ struct fc_fcp_internal *si;
+
+ if (!lp->tt.scsi_cleanup)
+ lp->tt.scsi_cleanup = fc_fcp_cleanup;
+
+ if (!lp->tt.scsi_abort_io)
+ lp->tt.scsi_abort_io = fc_fcp_abort_io;
+
+ si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+ lp->scsi_priv = si;
+ INIT_LIST_HEAD(&si->scsi_pkt_queue);
+
+ si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
+ if (!si->scsi_pkt_pool) {
+ rc = -ENOMEM;
+ goto free_internal;
+ }
+ return 0;
+
+free_internal:
+ kfree(si);
+ return rc;
+}
+EXPORT_SYMBOL(fc_fcp_init);
+
+static int __init libfc_init(void)
+{
+ scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+ sizeof(struct fc_fcp_pkt),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (scsi_pkt_cachep == NULL) {
+ FC_DBG("Unable to allocate SRB cache...module load failed!");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void __exit libfc_exit(void)
+{
+ kmem_cache_destroy(scsi_pkt_cachep);
+}
+
+module_init(libfc_init);
+module_exit(libfc_exit);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 0000000..7ba241e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Frame allocation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+
+#include <scsi/libfc/fc_frame.h>
+
+/*
+ * Check the CRC in a frame.
+ */
+u32 fc_frame_crc_check(struct fc_frame *fp)
+{
+ u32 crc;
+ u32 error;
+ const u8 *bp;
+ unsigned int len;
+
+ WARN_ON(!fc_frame_is_linear(fp));
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
+ bp = (const u8 *) fr_hdr(fp);
+ crc = ~crc32(~0, bp, len);
+ error = crc ^ *(u32 *) (bp + len);
+ return error;
+}
+EXPORT_SYMBOL(fc_frame_crc_check);
+
+/*
+ * Allocate a frame intended to be sent via fcoe_xmit.
+ * Get an sk_buff for the frame and set the length.
+ */
+struct fc_frame *__fc_frame_alloc(size_t len)
+{
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+
+ WARN_ON((len % sizeof(u32)) != 0);
+ len += sizeof(struct fc_frame_header);
+ skb = dev_alloc_skb(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM);
+ if (!skb)
+ return NULL;
+ fp = (struct fc_frame *) skb;
+ fc_frame_init(fp);
+ skb_reserve(skb, FC_FRAME_HEADROOM);
+ skb_put(skb, len);
+ return fp;
+}
+EXPORT_SYMBOL(__fc_frame_alloc);
+
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
+{
+ struct fc_frame *fp;
+ size_t fill;
+
+ fill = payload_len % 4;
+ if (fill != 0)
+ fill = 4 - fill;
+ fp = __fc_frame_alloc(payload_len + fill);
+ if (fp) {
+ memset((char *) fr_hdr(fp) + payload_len, 0, fill);
+ /* trim is OK, we just allocated it so there are no fragments */
+ skb_trim(fp_skb(fp), payload_len);
+ }
+ return fp;
+}
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 0000000..33cd556
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,914 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Logical interface support.
+ */
+
+#include <linux/timer.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc/libfc.h>
+
+/* Fabric IDs to use for point-to-point mode, chosen on whims. */
+#define FC_LOCAL_PTP_FID_LO 0x010101
+#define FC_LOCAL_PTP_FID_HI 0x010102
+
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+
+static int fc_lport_debug;
+
+static void fc_lport_enter_flogi(struct fc_lport *);
+static void fc_lport_enter_logo(struct fc_lport *);
+
+static const char *fc_lport_state_names[] = {
+ [LPORT_ST_NONE] = "none",
+ [LPORT_ST_FLOGI] = "FLOGI",
+ [LPORT_ST_DNS] = "dNS",
+ [LPORT_ST_REG_PN] = "REG_PN",
+ [LPORT_ST_REG_FT] = "REG_FT",
+ [LPORT_ST_SCR] = "SCR",
+ [LPORT_ST_READY] = "ready",
+ [LPORT_ST_DNS_STOP] = "stop",
+ [LPORT_ST_LOGO] = "LOGO",
+ [LPORT_ST_RESET] = "reset",
+};
+
+static int fc_frame_drop(struct fc_lport *lp, struct fc_frame *fp)
+{
+ fc_frame_free(fp);
+ return 0;
+}
+
+static const char *fc_lport_state(struct fc_lport *lp)
+{
+ const char *cp;
+
+ cp = fc_lport_state_names[lp->state];
+ if (!cp)
+ cp = "unknown";
+ return cp;
+}
+
+static void fc_lport_ptp_setup(struct fc_lport *lp,
+ u32 remote_fid, u64 remote_wwpn,
+ u64 remote_wwnn)
+{
+ struct fc_rport *rport;
+ struct fc_rport_identifiers ids = {
+ .port_id = remote_fid,
+ .port_name = remote_wwpn,
+ .node_name = remote_wwnn,
+ };
+
+ /*
+ * if we have to create a rport the fc class can sleep so we must
+ * drop the lock here
+ */
+ fc_lport_unlock(lp);
+ rport = lp->tt.rport_lookup(lp, ids.port_id); /* lookup and hold */
+ if (rport == NULL)
+ rport = lp->tt.rport_create(lp, &ids); /* create and hold */
+ fc_lport_lock(lp);
+ if (rport) {
+ if (lp->ptp_rp)
+ fc_remote_port_delete(lp->ptp_rp);
+ lp->ptp_rp = rport;
+ fc_lport_state_enter(lp, LPORT_ST_READY);
+ }
+}
+
+static void fc_lport_ptp_clear(struct fc_lport *lp)
+{
+ if (lp->ptp_rp) {
+ fc_remote_port_delete(lp->ptp_rp);
+ lp->ptp_rp = NULL;
+ }
+}
+
+/*
+ * Fill in FLOGI command for request.
+ */
+static void
+fc_lport_flogi_fill(struct fc_lport *lp,
+ struct fc_els_flogi *flogi, unsigned int op)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lp->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lp->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lp->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (op != ELS_FLOGI) {
+ sp->sp_features = htons(FC_SP_FT_CIRO);
+ sp->sp_tot_seq = htons(255); /* seq. we accept */
+ sp->sp_rel_off = htons(0x1f);
+ sp->sp_e_d_tov = htonl(lp->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lp->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+ }
+}
+
+/*
+ * Set the fid. This indicates that we have a new connection to the
+ * fabric so we should reset our list of fc_rports. Passing a fid of
+ * 0 will also reset the rport list regardless of the previous fid.
+ */
+static void fc_lport_set_fid(struct fc_lport *lp, u32 fid)
+{
+ if (fid != 0 && lp->fid == fid)
+ return;
+
+ if (fc_lport_debug)
+ FC_DBG("changing local port fid from %x to %x",
+ lp->fid, fid);
+ lp->fid = fid;
+ lp->tt.rport_reset_list(lp);
+}
+
+/*
+ * Add a supported FC-4 type.
+ */
+static void fc_lport_add_fc4_type(struct fc_lport *lp, enum fc_fh_type type)
+{
+ __be32 *mp;
+
+ mp = &lp->fcts.ff_type_map[type / FC_NS_BPW];
+ *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
+}
+
+/*
+ * Handle received RLIR - registered link incident report.
+ */
+static void fc_lport_rlir_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lp)
+{
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle received ECHO.
+ */
+static void fc_lport_echo_req(struct fc_seq *sp, struct fc_frame *in_fp,
+ struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ unsigned int len;
+ void *pp;
+ void *dp;
+ u32 f_ctl;
+
+ len = fr_len(in_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(in_fp, len);
+
+ if (len < sizeof(__be32))
+ len = sizeof(__be32);
+ fp = fc_frame_alloc(lp, len);
+ if (fp) {
+ dp = fc_frame_payload_get(fp, len);
+ memcpy(dp, pp, len);
+ *((u32 *)dp) = htonl(ELS_LS_ACC << 24);
+ sp = lp->tt.seq_start_next(sp);
+ f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ lp->tt.seq_send(lp, sp, fp, f_ctl);
+ }
+ fc_frame_free(in_fp);
+}
+
+/*
+ * Handle received RNID.
+ */
+static void fc_lport_rnid_req(struct fc_seq *sp, struct fc_frame *in_fp,
+ struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct fc_els_rnid *req;
+ struct {
+ struct fc_els_rnid_resp rnid;
+ struct fc_els_rnid_cid cid;
+ struct fc_els_rnid_gen gen;
+ } *rp;
+ struct fc_seq_els_data rjt_data;
+ u8 fmt;
+ size_t len;
+ u32 f_ctl;
+
+ req = fc_frame_payload_get(in_fp, sizeof(*req));
+ if (!req) {
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ } else {
+ fmt = req->rnid_fmt;
+ len = sizeof(*rp);
+ if (fmt != ELS_RNIDF_GEN ||
+ ntohl(lp->rnid_gen.rnid_atype) == 0) {
+ fmt = ELS_RNIDF_NONE; /* nothing to provide */
+ len -= sizeof(rp->gen);
+ }
+ fp = fc_frame_alloc(lp, len);
+ if (fp) {
+ rp = fc_frame_payload_get(fp, len);
+ memset(rp, 0, len);
+ rp->rnid.rnid_cmd = ELS_LS_ACC;
+ rp->rnid.rnid_fmt = fmt;
+ rp->rnid.rnid_cid_len = sizeof(rp->cid);
+ rp->cid.rnid_wwpn = htonll(lp->wwpn);
+ rp->cid.rnid_wwnn = htonll(lp->wwnn);
+ if (fmt == ELS_RNIDF_GEN) {
+ rp->rnid.rnid_sid_len = sizeof(rp->gen);
+ memcpy(&rp->gen, &lp->rnid_gen,
+ sizeof(rp->gen));
+ }
+ sp = lp->tt.seq_start_next(sp);
+ f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ lp->tt.seq_send(lp, sp, fp, f_ctl);
+ }
+ }
+ fc_frame_free(in_fp);
+}
+
+/*
+ * Handle received fabric logout request.
+ */
+static void fc_lport_recv_logo_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lp)
+{
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ fc_lport_enter_reset(lp);
+ fc_frame_free(fp);
+}
+
+/*
+ * Receive request frame
+ */
+
+int fc_fabric_login(struct fc_lport *lp)
+{
+ int rc = -1;
+
+ if (lp->state == LPORT_ST_NONE) {
+ fc_lport_lock(lp);
+ fc_lport_enter_reset(lp);
+ fc_lport_unlock(lp);
+ rc = 0;
+ }
+ return rc;
+}
+EXPORT_SYMBOL(fc_fabric_login);
+
+/**
+ * fc_linkup - link up notification
+ * @dev: Pointer to fc_lport .
+ **/
+void fc_linkup(struct fc_lport *lp)
+{
+ if ((lp->link_status & FC_LINK_UP) != FC_LINK_UP) {
+ lp->link_status |= FC_LINK_UP;
+ fc_lport_lock(lp);
+ if (lp->state == LPORT_ST_RESET)
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+}
+EXPORT_SYMBOL(fc_linkup);
+
+/**
+ * fc_linkdown - link down notification
+ * @dev: Pointer to fc_lport .
+ **/
+void fc_linkdown(struct fc_lport *lp)
+{
+ if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP) {
+ lp->link_status &= ~(FC_LINK_UP);
+ fc_lport_enter_reset(lp);
+ lp->tt.scsi_cleanup(lp);
+ }
+}
+EXPORT_SYMBOL(fc_linkdown);
+
+void fc_pause(struct fc_lport *lp)
+{
+ lp->link_status |= FC_PAUSE;
+}
+EXPORT_SYMBOL(fc_pause);
+
+void fc_unpause(struct fc_lport *lp)
+{
+ lp->link_status &= ~(FC_PAUSE);
+}
+EXPORT_SYMBOL(fc_unpause);
+
+int fc_fabric_logoff(struct fc_lport *lp)
+{
+ fc_lport_lock(lp);
+ switch (lp->state) {
+ case LPORT_ST_NONE:
+ break;
+ case LPORT_ST_FLOGI:
+ case LPORT_ST_LOGO:
+ case LPORT_ST_RESET:
+ fc_lport_enter_reset(lp);
+ break;
+ case LPORT_ST_DNS:
+ case LPORT_ST_DNS_STOP:
+ fc_lport_enter_logo(lp);
+ break;
+ case LPORT_ST_REG_PN:
+ case LPORT_ST_REG_FT:
+ case LPORT_ST_SCR:
+ case LPORT_ST_READY:
+ lp->tt.disc_stop(lp);
+ break;
+ }
+ fc_lport_unlock(lp);
+ lp->tt.scsi_cleanup(lp);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_fabric_logoff);
+
+/**
+ * fc_lport_destroy - unregister a fc_lport
+ * @lp: fc_lport pointer to unregister
+ *
+ * Return value:
+ * None
+ * Note:
+ * exit routine for fc_lport instance
+ * clean-up all the allocated memory
+ * and free up other system resources.
+ *
+ **/
+int fc_lport_destroy(struct fc_lport *lp)
+{
+ fc_lport_lock(lp);
+ fc_lport_state_enter(lp, LPORT_ST_LOGO);
+ fc_lport_unlock(lp);
+
+ cancel_delayed_work_sync(&lp->ns_disc_work);
+
+ lp->tt.scsi_abort_io(lp);
+
+ lp->tt.frame_send = fc_frame_drop;
+
+ lp->tt.exch_mgr_reset(lp->emp, 0, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_destroy);
+
+int fc_set_mfs(struct fc_lport *lp, u32 mfs)
+{
+ unsigned int old_mfs;
+ int rc = -1;
+
+ old_mfs = lp->mfs;
+
+ if (mfs >= FC_MIN_MAX_FRAME) {
+ mfs &= ~3;
+ WARN_ON((size_t) mfs < FC_MIN_MAX_FRAME);
+ if (mfs > FC_MAX_FRAME)
+ mfs = FC_MAX_FRAME;
+ mfs -= sizeof(struct fc_frame_header);
+ lp->mfs = mfs;
+ rc = 0;
+ }
+
+ if (!rc && mfs < old_mfs) {
+ lp->ns_disc_done = 0;
+ fc_lport_enter_reset(lp);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(fc_set_mfs);
+
+/*
+ * re-enter state for retrying a request after a timeout or alloc failure.
+ */
+static void fc_lport_enter_retry(struct fc_lport *lp)
+{
+ switch (lp->state) {
+ case LPORT_ST_NONE:
+ case LPORT_ST_READY:
+ case LPORT_ST_RESET:
+ case LPORT_ST_DNS:
+ case LPORT_ST_DNS_STOP:
+ case LPORT_ST_REG_PN:
+ case LPORT_ST_REG_FT:
+ case LPORT_ST_SCR:
+ WARN_ON(1);
+ break;
+ case LPORT_ST_FLOGI:
+ fc_lport_enter_flogi(lp);
+ break;
+ case LPORT_ST_LOGO:
+ fc_lport_enter_logo(lp);
+ break;
+ }
+}
+
+/*
+ * enter next state for handling an exchange reject or retry exhaustion
+ * in the current state.
+ */
+static void fc_lport_enter_reject(struct fc_lport *lp)
+{
+ switch (lp->state) {
+ case LPORT_ST_NONE:
+ case LPORT_ST_READY:
+ case LPORT_ST_RESET:
+ case LPORT_ST_REG_PN:
+ case LPORT_ST_REG_FT:
+ case LPORT_ST_SCR:
+ case LPORT_ST_DNS_STOP:
+ case LPORT_ST_DNS:
+ WARN_ON(1);
+ break;
+ case LPORT_ST_FLOGI:
+ fc_lport_enter_flogi(lp);
+ break;
+ case LPORT_ST_LOGO:
+ fc_lport_enter_reset(lp);
+ break;
+ }
+}
+
+/*
+ * Handle resource allocation problem by retrying in a bit.
+ */
+static void fc_lport_retry(struct fc_lport *lp)
+{
+ if (lp->retry_count == 0)
+ FC_DBG("local port %6x alloc failure in state %s "
+ "- will retry", lp->fid, fc_lport_state(lp));
+ if (lp->retry_count < lp->max_retry_count) {
+ lp->retry_count++;
+ mod_timer(&lp->state_timer,
+ jiffies + msecs_to_jiffies(lp->e_d_tov));
+ } else {
+ FC_DBG("local port %6x alloc failure in state %s "
+ "- retries exhausted", lp->fid,
+ fc_lport_state(lp));
+ fc_lport_enter_reject(lp);
+ }
+}
+
+/*
+ * A received FLOGI request indicates a point-to-point connection.
+ * Accept it with the common service parameters indicating our N port.
+ * Set up to do a PLOGI if we have the higher-number WWPN.
+ */
+static void fc_lport_recv_flogi_req(struct fc_seq *sp_in,
+ struct fc_frame *rx_fp,
+ struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_seq *sp;
+ struct fc_els_flogi *flp;
+ struct fc_els_flogi *new_flp;
+ u64 remote_wwpn;
+ u32 remote_fid;
+ u32 local_fid;
+ u32 f_ctl;
+
+ fh = fc_frame_header_get(rx_fp);
+ remote_fid = ntoh24(fh->fh_s_id);
+ flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
+ if (!flp)
+ goto out;
+ remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
+ if (remote_wwpn == lp->wwpn) {
+ FC_DBG("FLOGI from port with same WWPN %llx "
+ "possible configuration error.", remote_wwpn);
+ goto out;
+ }
+ FC_DBG("FLOGI from port WWPN %llx ", remote_wwpn);
+ fc_lport_lock(lp);
+
+ /*
+ * XXX what is the right thing to do for FIDs?
+ * The originator might expect our S_ID to be 0xfffffe.
+ * But if so, both of us could end up with the same FID.
+ */
+ local_fid = FC_LOCAL_PTP_FID_LO;
+ if (remote_wwpn < lp->wwpn) {
+ local_fid = FC_LOCAL_PTP_FID_HI;
+ if (!remote_fid || remote_fid == local_fid)
+ remote_fid = FC_LOCAL_PTP_FID_LO;
+ } else if (!remote_fid) {
+ remote_fid = FC_LOCAL_PTP_FID_HI;
+ }
+ fc_lport_set_fid(lp, local_fid);
+
+ fp = fc_frame_alloc(lp, sizeof(*flp));
+ if (fp) {
+ sp = lp->tt.seq_start_next(fr_seq(rx_fp));
+ new_flp = fc_frame_payload_get(fp, sizeof(*flp));
+ fc_lport_flogi_fill(lp, new_flp, ELS_FLOGI);
+ new_flp->fl_cmd = (u8) ELS_LS_ACC;
+
+ /*
+ * Send the response. If this fails, the originator should
+ * repeat the sequence.
+ */
+ f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ lp->tt.seq_send(lp, sp, fp, f_ctl);
+
+ } else {
+ fc_lport_retry(lp);
+ }
+ fc_lport_ptp_setup(lp, remote_fid, remote_wwpn,
+ get_unaligned_be64(&flp->fl_wwnn));
+ fc_lport_unlock(lp);
+ if (lp->tt.disc_start(lp))
+ FC_DBG("target discovery start error\n");
+out:
+ sp = fr_seq(rx_fp);
+ fc_frame_free(rx_fp);
+}
+
+static void fc_lport_recv(struct fc_lport *lp, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ u32 s_id;
+ u32 d_id;
+ struct fc_seq_els_data rjt_data;
+
+ /*
+ * Handle special ELS cases like FLOGI, LOGO, and
+ * RSCN here. These don't require a session.
+ * Even if we had a session, it might not be ready.
+ */
+ if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+ /*
+ * Check opcode.
+ */
+ recv = NULL;
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_FLOGI:
+ recv = fc_lport_recv_flogi_req;
+ break;
+ case ELS_LOGO:
+ fh = fc_frame_header_get(fp);
+ if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI)
+ recv = fc_lport_recv_logo_req;
+ break;
+ case ELS_RSCN:
+ recv = lp->tt.disc_recv_req;
+ break;
+ case ELS_ECHO:
+ recv = fc_lport_echo_req;
+ break;
+ case ELS_RLIR:
+ recv = fc_lport_rlir_req;
+ break;
+ case ELS_RNID:
+ recv = fc_lport_rnid_req;
+ break;
+ }
+
+ if (recv)
+ recv(sp, fp, lp);
+ else {
+ /*
+ * Find session.
+ * If this is a new incoming PLOGI, we won't find it.
+ */
+ s_id = ntoh24(fh->fh_s_id);
+ d_id = ntoh24(fh->fh_d_id);
+
+ rport = lp->tt.rport_lookup(lp, s_id);
+ if (rport) {
+ rp = rport->dd_data;
+ lp->tt.rport_recv_req(sp, fp, rp);
+ put_device(&rport->dev); /* hold from lookup */
+ } else {
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lp->tt.seq_els_rsp_send(sp,
+ ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+ }
+ }
+ } else {
+ FC_DBG("dropping invalid frame (eof %x)", fr_eof(fp));
+ fc_frame_free(fp);
+ }
+
+ /*
+ * The common exch_done for all request may not be good
+ * if any request requires longer hold on exhange. XXX
+ */
+ lp->tt.exch_done(sp);
+}
+
+/*
+ * Put the local port back into the initial state. Reset all sessions.
+ * This is called after a SCSI reset or the driver is unloading
+ * or the program is exiting.
+ */
+int fc_lport_enter_reset(struct fc_lport *lp)
+{
+ if (fc_lport_debug)
+ FC_DBG("Processing RESET state");
+
+ if (lp->dns_rp) {
+ fc_remote_port_delete(lp->dns_rp);
+ lp->dns_rp = NULL;
+ }
+ fc_lport_ptp_clear(lp);
+
+ /*
+ * Setting state RESET keeps fc_lport_error() callbacks
+ * by exch_mgr_reset() from recursing on the lock.
+ * It also causes fc_lport_sess_event() to ignore events.
+ * The lock is held for the duration of the time in RESET state.
+ */
+ fc_lport_state_enter(lp, LPORT_ST_RESET);
+ lp->tt.exch_mgr_reset(lp->emp, 0, 0);
+ fc_lport_set_fid(lp, 0);
+ if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
+ fc_lport_enter_flogi(lp);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_enter_reset);
+
+/*
+ * Handle errors on local port requests.
+ * Don't get locks if in RESET state.
+ * The only possible errors so far are exchange TIMEOUT and CLOSED (reset).
+ */
+static void fc_lport_error(struct fc_lport *lp, struct fc_frame *fp)
+{
+ if (lp->state == LPORT_ST_RESET)
+ return;
+
+ fc_lport_lock(lp);
+ if (PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ if (lp->retry_count < lp->max_retry_count) {
+ lp->retry_count++;
+ fc_lport_enter_retry(lp);
+ } else {
+ fc_lport_enter_reject(lp);
+
+ }
+ }
+ if (fc_lport_debug)
+ FC_DBG("error %ld retries %d limit %d",
+ PTR_ERR(fp), lp->retry_count, lp->max_retry_count);
+ fc_lport_unlock(lp);
+}
+
+static void fc_lport_timeout(unsigned long lp_arg)
+{
+ struct fc_lport *lp = (struct fc_lport *)lp_arg;
+
+ fc_lport_lock(lp);
+ fc_lport_enter_retry(lp);
+ fc_lport_unlock(lp);
+}
+
+static void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lp = lp_arg;
+
+ if (IS_ERR(fp))
+ fc_lport_error(lp, fp);
+ else {
+ fc_frame_free(fp);
+ fc_lport_lock(lp);
+ fc_lport_enter_reset(lp);
+ fc_lport_unlock(lp);
+ }
+}
+
+/* Logout of the FC fabric */
+static void fc_lport_enter_logo(struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct fc_els_logo *logo;
+
+ if (fc_lport_debug)
+ FC_DBG("Processing LOGO state");
+
+ fc_lport_state_enter(lp, LPORT_ST_LOGO);
+
+ /* DNS session should be closed so we can release it here */
+ if (lp->dns_rp) {
+ fc_remote_port_delete(lp->dns_rp);
+ lp->dns_rp = NULL;
+ }
+
+ fp = fc_frame_alloc(lp, sizeof(*logo));
+ if (!fp) {
+ FC_DBG("failed to allocate frame\n");
+ return;
+ }
+
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, lp->fid);
+ logo->fl_n_port_wwn = htonll(lp->wwpn);
+
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ fc_frame_set_offset(fp, 0);
+
+ lp->tt.exch_seq_send(lp, fp,
+ fc_lport_logo_resp,
+ lp, lp->e_d_tov,
+ lp->fid, FC_FID_FLOGI,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+}
+
+static int fc_lport_logout(struct fc_lport *lp)
+{
+ fc_lport_lock(lp);
+ if (lp->state != LPORT_ST_LOGO)
+ fc_lport_enter_logo(lp);
+ fc_lport_unlock(lp);
+ return 0;
+}
+
+/*
+ * Handle incoming ELS FLOGI response.
+ * Save parameters of remote switch. Finish exchange.
+ */
+static void
+fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg)
+{
+ struct fc_lport *lp = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *flp;
+ u32 did;
+ u16 csp_flags;
+ unsigned int r_a_tov;
+ unsigned int e_d_tov;
+ u16 mfs;
+
+ if (IS_ERR(fp))
+ goto out;
+
+ fh = fc_frame_header_get(fp);
+ did = ntoh24(fh->fh_d_id);
+ if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
+ if (fc_lport_debug)
+ FC_DBG("assigned fid %x", did);
+ fc_lport_lock(lp);
+ fc_lport_set_fid(lp, did);
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (flp) {
+ mfs = ntohs(flp->fl_csp.sp_bb_data) &
+ FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD &&
+ mfs < lp->mfs)
+ lp->mfs = mfs;
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+ e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ if (e_d_tov > lp->e_d_tov)
+ lp->e_d_tov = e_d_tov;
+ lp->r_a_tov = 2 * e_d_tov;
+ FC_DBG("point-to-point mode");
+ fc_lport_ptp_setup(lp, ntoh24(fh->fh_s_id),
+ get_unaligned_be64(
+ &flp->fl_wwpn),
+ get_unaligned_be64(
+ &flp->fl_wwnn));
+ } else {
+ lp->e_d_tov = e_d_tov;
+ lp->r_a_tov = r_a_tov;
+ lp->tt.disc_enter_dns(lp);
+ }
+ }
+ fc_lport_unlock(lp);
+ if (flp) {
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ if (lp->tt.disc_start(lp))
+ FC_DBG("target disc start error\n");
+ }
+ }
+ } else {
+ FC_DBG("bad FLOGI response\n");
+ }
+ fc_frame_free(fp);
+out:
+ fc_lport_error(lp, fp);
+}
+
+/*
+ * Send ELS (extended link service) FLOGI request to peer.
+ */
+static void fc_lport_flogi_send(struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct fc_els_flogi *flp;
+
+ fp = fc_frame_alloc(lp, sizeof(*flp));
+ if (!fp)
+ return fc_lport_retry(lp);
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ fc_lport_flogi_fill(lp, flp, ELS_FLOGI);
+
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ fc_frame_set_offset(fp, 0);
+
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_lport_flogi_resp,
+ lp, lp->e_d_tov,
+ 0, FC_FID_FLOGI,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_lport_retry(lp);
+
+}
+
+void fc_lport_enter_flogi(struct fc_lport *lp)
+{
+ if (fc_lport_debug)
+ FC_DBG("Processing FLOGI state");
+ fc_lport_state_enter(lp, LPORT_ST_FLOGI);
+ fc_lport_flogi_send(lp);
+}
+
+/* Configure a fc_lport */
+int fc_lport_config(struct fc_lport *lp)
+{
+ setup_timer(&lp->state_timer, fc_lport_timeout, (unsigned long)lp);
+ spin_lock_init(&lp->state_lock);
+
+ fc_lport_lock(lp);
+ fc_lport_state_enter(lp, LPORT_ST_NONE);
+ fc_lport_unlock(lp);
+
+ lp->ns_disc_delay = DNS_DELAY;
+
+ fc_lport_add_fc4_type(lp, FC_TYPE_FCP);
+ fc_lport_add_fc4_type(lp, FC_TYPE_CT);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_config);
+
+int fc_lport_init(struct fc_lport *lp)
+{
+ if (!lp->tt.lport_recv)
+ lp->tt.lport_recv = fc_lport_recv;
+
+ if (!lp->tt.lport_login)
+ lp->tt.lport_login = fc_lport_enter_reset;
+
+ if (!lp->tt.lport_reset)
+ lp->tt.lport_reset = fc_lport_enter_reset;
+
+ if (!lp->tt.lport_logout)
+ lp->tt.lport_logout = fc_lport_logout;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_ns.c b/drivers/scsi/libfc/fc_ns.c
new file mode 100644
index 0000000..5ac0e6f
--- /dev/null
+++ b/drivers/scsi/libfc/fc_ns.c
@@ -0,0 +1,1229 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Target Discovery
+ * Actually, this discovers all FC-4 remote ports, including FCP initiators.
+ */
+
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc/libfc.h>
+
+#define FC_NS_RETRY_LIMIT 3 /* max retries */
+#define FC_NS_RETRY_DELAY 500UL /* (msecs) delay */
+
+int fc_ns_debug;
+
+static void fc_ns_gpn_ft_req(struct fc_lport *);
+static void fc_ns_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+static int fc_ns_new_target(struct fc_lport *, struct fc_rport *,
+ struct fc_rport_identifiers *);
+static void fc_ns_del_target(struct fc_lport *, struct fc_rport *);
+static void fc_ns_disc_done(struct fc_lport *);
+static void fcdt_ns_error(struct fc_lport *, struct fc_frame *);
+static void fc_ns_timeout(struct work_struct *);
+
+struct fc_ns_port {
+ struct fc_lport *lp;
+ struct list_head peers;
+ struct fc_rport_identifiers ids;
+};
+
+static int fc_ns_gpn_id_req(struct fc_lport *, struct fc_ns_port *);
+static void fc_ns_gpn_id_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_ns_gpn_id_error(struct fc_ns_port *rp, struct fc_frame *fp);
+
+static int fc_ns_gnn_id_req(struct fc_lport *, struct fc_ns_port *);
+static void fc_ns_gnn_id_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_ns_gnn_id_error(struct fc_ns_port *, struct fc_frame *);
+static void fc_ns_enter_reg_pn(struct fc_lport *lp);
+static void fc_ns_error(struct fc_lport *lp, struct fc_frame *fp);
+static void fc_lport_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
+ unsigned int op, unsigned int req_size);
+static void fc_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg);
+static void fc_ns_retry(struct fc_lport *lp);
+static void fc_ns_single(struct fc_lport *, struct fc_ns_port *);
+static int fc_ns_restart(struct fc_lport *);
+
+
+/*
+ * Handle received RSCN - registered state change notification.
+ */
+static void fc_ns_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lp)
+{
+ struct fc_els_rscn *rp;
+ struct fc_els_rscn_page *pp;
+ struct fc_seq_els_data rjt_data;
+ unsigned int len;
+ int redisc = 0;
+ enum fc_els_rscn_ev_qual ev_qual;
+ enum fc_els_rscn_addr_fmt fmt;
+ LIST_HEAD(disc_list);
+ struct fc_ns_port *dp, *next;
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+
+ if (!rp || rp->rscn_page_len != sizeof(*pp))
+ goto reject;
+
+ len = ntohs(rp->rscn_plen);
+ if (len < sizeof(*rp))
+ goto reject;
+ len -= sizeof(*rp);
+
+ for (pp = (void *)(rp + 1); len; len -= sizeof(*pp), pp++) {
+ ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+ ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+ fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+ fmt &= ELS_RSCN_ADDR_FMT_MASK;
+ /*
+ * if we get an address format other than port
+ * (area, domain, fabric), then do a full discovery
+ */
+ switch (fmt) {
+ case ELS_ADDR_FMT_PORT:
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp) {
+ redisc = 1;
+ break;
+ }
+ dp->lp = lp;
+ dp->ids.port_id = ntoh24(pp->rscn_fid);
+ dp->ids.port_name = -1;
+ dp->ids.node_name = -1;
+ dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ list_add_tail(&dp->peers, &disc_list);
+ break;
+ case ELS_ADDR_FMT_AREA:
+ case ELS_ADDR_FMT_DOM:
+ case ELS_ADDR_FMT_FAB:
+ default:
+ redisc = 1;
+ break;
+ }
+ }
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ if (redisc) {
+ if (fc_ns_debug)
+ FC_DBG("RSCN received: rediscovering");
+ list_for_each_entry_safe(dp, next, &disc_list, peers) {
+ list_del(&dp->peers);
+ kfree(dp);
+ }
+ fc_ns_restart(lp);
+ } else {
+ if (fc_ns_debug)
+ FC_DBG("RSCN received: not rediscovering. "
+ "redisc %d state %d in_prog %d",
+ redisc, lp->state, lp->ns_disc_pending);
+ list_for_each_entry_safe(dp, next, &disc_list, peers) {
+ list_del(&dp->peers);
+ fc_ns_single(lp, dp);
+ }
+ }
+ fc_frame_free(fp);
+ return;
+reject:
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+static void fc_ns_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+ struct fc_lport *lp)
+{
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_RSCN:
+ fc_ns_rscn_req(sp, fp, lp);
+ break;
+ default:
+ FC_DBG("fc_ns recieved an unexpected request\n");
+ break;
+ }
+}
+
+static void fc_ns_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lp = lp_arg;
+ int err;
+
+ if (IS_ERR(fp))
+ fc_ns_error(lp, fp);
+ else {
+ fc_lport_lock(lp);
+ fc_lport_state_enter(lp, LPORT_ST_READY);
+ fc_lport_unlock(lp);
+ err = lp->tt.disc_start(lp);
+ if (err)
+ FC_DBG("target discovery start error\n");
+ fc_frame_free(fp);
+ }
+}
+
+static void fc_ns_enter_scr(struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct fc_els_scr *scr;
+
+ if (fc_ns_debug)
+ FC_DBG("Processing SCR state");
+
+ fc_lport_state_enter(lp, LPORT_ST_SCR);
+
+ fp = fc_frame_alloc(lp, sizeof(*scr));
+ if (fp) {
+ scr = fc_frame_payload_get(fp, sizeof(*scr));
+ memset(scr, 0, sizeof(*scr));
+ scr->scr_cmd = ELS_SCR;
+ scr->scr_reg_func = ELS_SCRF_FULL;
+ }
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ fc_frame_set_offset(fp, 0);
+
+ lp->tt.exch_seq_send(lp, fp,
+ fc_ns_scr_resp,
+ lp, lp->e_d_tov,
+ lp->fid, FC_FID_FCTRL,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+}
+
+/*
+ * Register FC4-types with name server.
+ */
+static void fc_ns_enter_reg_ft(struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct req {
+ struct fc_ct_hdr ct;
+ struct fc_ns_fid fid; /* port ID object */
+ struct fc_ns_fts fts; /* FC4-types object */
+ } *req;
+ struct fc_ns_fts *lps;
+ int i;
+
+ if (fc_ns_debug)
+ FC_DBG("Processing REG_FT state");
+
+ fc_lport_state_enter(lp, LPORT_ST_REG_FT);
+
+ lps = &lp->fcts;
+ i = sizeof(lps->ff_type_map) / sizeof(lps->ff_type_map[0]);
+ while (--i >= 0)
+ if (ntohl(lps->ff_type_map[i]) != 0)
+ break;
+ if (i >= 0) {
+ fp = fc_frame_alloc(lp, sizeof(*req));
+ if (fp) {
+ req = fc_frame_payload_get(fp, sizeof(*req));
+ fc_lport_fill_dns_hdr(lp, &req->ct,
+ FC_NS_RFT_ID,
+ sizeof(*req) -
+ sizeof(struct fc_ct_hdr));
+ hton24(req->fid.fp_fid, lp->fid);
+ req->fts = *lps;
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_ns_resp, lp,
+ lp->e_d_tov,
+ lp->fid,
+ lp->dns_rp->port_id,
+ FC_FC_SEQ_INIT |
+ FC_FC_END_SEQ))
+ fc_ns_retry(lp);
+ } else {
+ fc_ns_retry(lp);
+ }
+ } else {
+ fc_ns_enter_scr(lp);
+ }
+}
+
+/*
+ * enter next state for handling an exchange reject or retry exhaustion
+ * in the current state.
+ */
+static void fc_ns_enter_reject(struct fc_lport *lp)
+{
+ switch (lp->state) {
+ case LPORT_ST_NONE:
+ case LPORT_ST_READY:
+ case LPORT_ST_RESET:
+ case LPORT_ST_FLOGI:
+ case LPORT_ST_LOGO:
+ WARN_ON(1);
+ break;
+ case LPORT_ST_REG_PN:
+ fc_ns_enter_reg_ft(lp);
+ break;
+ case LPORT_ST_REG_FT:
+ fc_ns_enter_scr(lp);
+ break;
+ case LPORT_ST_SCR:
+ case LPORT_ST_DNS_STOP:
+ lp->tt.disc_stop(lp);
+ break;
+ case LPORT_ST_DNS:
+ lp->tt.lport_reset(lp);
+ break;
+ }
+}
+
+static void fc_ns_enter_retry(struct fc_lport *lp)
+{
+ switch (lp->state) {
+ case LPORT_ST_NONE:
+ case LPORT_ST_RESET:
+ case LPORT_ST_READY:
+ case LPORT_ST_FLOGI:
+ case LPORT_ST_LOGO:
+ WARN_ON(1);
+ break;
+ case LPORT_ST_DNS:
+ lp->tt.disc_enter_dns(lp);
+ break;
+ case LPORT_ST_DNS_STOP:
+ lp->tt.disc_stop(lp);
+ break;
+ case LPORT_ST_REG_PN:
+ fc_ns_enter_reg_pn(lp);
+ break;
+ case LPORT_ST_REG_FT:
+ fc_ns_enter_reg_ft(lp);
+ break;
+ case LPORT_ST_SCR:
+ fc_ns_enter_scr(lp);
+ break;
+ }
+}
+
+/*
+ * Refresh target discovery, perhaps due to an RSCN.
+ * A configurable delay is introduced to collect any subsequent RSCNs.
+ */
+static int fc_ns_restart(struct fc_lport *lp)
+{
+ fc_lport_lock(lp);
+ if (!lp->ns_disc_requested && !lp->ns_disc_pending) {
+ schedule_delayed_work(&lp->ns_disc_work,
+ msecs_to_jiffies(lp->ns_disc_delay * 1000));
+ }
+ lp->ns_disc_requested = 1;
+ fc_lport_unlock(lp);
+ return 0;
+}
+
+/* unlocked varient of scsi_target_block from scsi_lib.c */
+#include "../scsi_priv.h"
+
+static void __device_block(struct scsi_device *sdev, void *data)
+{
+ scsi_internal_device_block(sdev);
+}
+
+static int __target_block(struct device *dev, void *data)
+{
+ if (scsi_is_target_device(dev))
+ __starget_for_each_device(to_scsi_target(dev),
+ NULL, __device_block);
+ return 0;
+}
+
+static void __scsi_target_block(struct device *dev)
+{
+ if (scsi_is_target_device(dev))
+ __starget_for_each_device(to_scsi_target(dev),
+ NULL, __device_block);
+ else
+ device_for_each_child(dev, NULL, __target_block);
+}
+
+static void fc_block_rports(struct fc_lport *lp)
+{
+ struct Scsi_Host *shost = lp->host;
+ struct fc_rport *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(rport, &fc_host_rports(shost), peers) {
+ /* protect the name service remote port */
+ if (rport == lp->dns_rp)
+ continue;
+ if (rport->port_state != FC_PORTSTATE_ONLINE)
+ continue;
+ rport->port_state = FC_PORTSTATE_BLOCKED;
+ rport->flags |= FC_RPORT_DEVLOSS_PENDING;
+ __scsi_target_block(&rport->dev);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Fibre Channel Target discovery.
+ *
+ * Returns non-zero if discovery cannot be started.
+ *
+ * Callback is called for each target remote port found in discovery.
+ * When discovery is complete, the callback is called with a NULL remote port.
+ * Discovery may be restarted after an RSCN is received, causing the
+ * callback to be called after discovery complete is indicated.
+ */
+int fc_ns_disc_start(struct fc_lport *lp)
+{
+ struct fc_rport *rport;
+ int error;
+ struct fc_rport_identifiers ids;
+
+ fc_lport_lock(lp);
+
+ /*
+ * If not ready, or already running discovery, just set request flag.
+ */
+ if (!fc_lport_test_ready(lp) || lp->ns_disc_pending) {
+ lp->ns_disc_requested = 1;
+ fc_lport_unlock(lp);
+ return 0;
+ }
+ lp->ns_disc_pending = 1;
+ lp->ns_disc_requested = 0;
+ lp->ns_disc_retry_count = 0;
+
+ /*
+ * Handle point-to-point mode as a simple discovery
+ * of the remote port.
+ */
+ rport = lp->ptp_rp;
+ if (rport) {
+ ids.port_id = rport->port_id;
+ ids.port_name = rport->port_name;
+ ids.node_name = rport->node_name;
+ ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ get_device(&rport->dev);
+ fc_lport_unlock(lp);
+ error = fc_ns_new_target(lp, rport, &ids);
+ put_device(&rport->dev);
+ if (!error)
+ fc_ns_disc_done(lp);
+ } else {
+ fc_lport_unlock(lp);
+ fc_block_rports(lp);
+ fc_ns_gpn_ft_req(lp); /* get ports by FC-4 type */
+ error = 0;
+ }
+ return error;
+}
+
+/*
+ * Handle resource allocation problem by retrying in a bit.
+ */
+static void fc_ns_retry(struct fc_lport *lp)
+{
+ if (lp->retry_count == 0)
+ FC_DBG("local port %6x alloc failure "
+ "- will retry", lp->fid);
+ if (lp->retry_count < lp->max_retry_count) {
+ lp->retry_count++;
+ mod_timer(&lp->state_timer,
+ jiffies + msecs_to_jiffies(lp->e_d_tov));
+ } else {
+ FC_DBG("local port %6x alloc failure "
+ "- retries exhausted", lp->fid);
+ fc_ns_enter_reject(lp);
+ }
+}
+
+/*
+ * Handle errors on local port requests.
+ * Don't get locks if in RESET state.
+ * The only possible errors so far are exchange TIMEOUT and CLOSED (reset).
+ */
+static void fc_ns_error(struct fc_lport *lp, struct fc_frame *fp)
+{
+ if (lp->state == LPORT_ST_RESET)
+ return;
+
+ fc_lport_lock(lp);
+ if (PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ if (lp->retry_count < lp->max_retry_count) {
+ lp->retry_count++;
+ fc_ns_enter_retry(lp);
+ } else {
+ fc_ns_enter_reject(lp);
+ }
+ }
+ if (fc_ns_debug)
+ FC_DBG("error %ld retries %d limit %d",
+ PTR_ERR(fp), lp->retry_count, lp->max_retry_count);
+ fc_lport_unlock(lp);
+}
+
+/*
+ * Restart discovery after a delay due to resource shortages.
+ * If the error persists, the discovery will be abandoned.
+ */
+static void fcdt_ns_retry(struct fc_lport *lp)
+{
+ unsigned long delay = FC_NS_RETRY_DELAY;
+
+ if (!lp->ns_disc_retry_count)
+ delay /= 4; /* timeout faster first time */
+ if (lp->ns_disc_retry_count++ < FC_NS_RETRY_LIMIT)
+ schedule_delayed_work(&lp->ns_disc_work,
+ msecs_to_jiffies(delay));
+ else
+ fc_ns_disc_done(lp);
+}
+
+/*
+ * Test for dNS accept in response payload.
+ */
+static int fc_lport_dns_acc(struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+ int rc = 0;
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_DIR &&
+ ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+ ntohs(ct->ct_cmd) == FC_FS_ACC) {
+ rc = 1;
+ }
+ return rc;
+}
+
+/*
+ * Handle response from name server.
+ */
+static void
+fc_ns_resp(struct fc_seq *sp, struct fc_frame *fp, void *lp_arg)
+{
+ struct fc_lport *lp = lp_arg;
+
+ if (!IS_ERR(fp)) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ if (fc_lport_dns_acc(fp)) {
+ if (lp->state == LPORT_ST_REG_PN)
+ fc_ns_enter_reg_ft(lp);
+ else
+ fc_ns_enter_scr(lp);
+
+ } else {
+ fc_ns_retry(lp);
+ }
+ fc_lport_unlock(lp);
+ fc_frame_free(fp);
+ } else
+ fc_ns_error(lp, fp);
+}
+
+/*
+ * Handle new target found by discovery.
+ * Create remote port and session if needed.
+ * Ignore returns of our own FID & WWPN.
+ *
+ * If a non-NULL rp is passed in, it is held for the caller, but not for us.
+ *
+ * Events delivered are:
+ * FC_EV_READY, when remote port is rediscovered.
+ */
+static int fc_ns_new_target(struct fc_lport *lp,
+ struct fc_rport *rport,
+ struct fc_rport_identifiers *ids)
+{
+ struct fc_rport_libfc_priv *rp;
+ int error = 0;
+
+ if (rport && ids->port_name) {
+ if (rport->port_name == -1) {
+ /*
+ * Set WWN and fall through to notify of create.
+ */
+ fc_rport_set_name(rport, ids->port_name,
+ rport->node_name);
+ } else if (rport->port_name != ids->port_name) {
+ /*
+ * This is a new port with the same FCID as
+ * a previously-discovered port. Presumably the old
+ * port logged out and a new port logged in and was
+ * assigned the same FCID. This should be rare.
+ * Delete the old one and fall thru to re-create.
+ */
+ fc_ns_del_target(lp, rport);
+ rport = NULL;
+ }
+ }
+ if (((ids->port_name != -1) || (ids->port_id != -1)) &&
+ ids->port_id != lp->fid && ids->port_name != lp->wwpn) {
+ if (!rport) {
+ rport = lp->tt.rport_lookup(lp, ids->port_id);
+ if (rport == NULL)
+ rport = lp->tt.rport_create(lp, ids);
+ if (!rport)
+ error = ENOMEM;
+ }
+ if (rport) {
+ rp = rport->dd_data;
+ rp->rp_state = RPORT_ST_INIT;
+ lp->tt.rport_login(rport);
+ }
+ }
+ return error;
+}
+
+/*
+ * Delete the remote port.
+ */
+static void fc_ns_del_target(struct fc_lport *lp, struct fc_rport *rport)
+{
+ lp->tt.rport_reset(rport);
+ fc_remote_port_delete(rport); /* release hold from create */
+}
+
+/*
+ * Done with discovery
+ */
+static void fc_ns_disc_done(struct fc_lport *lp)
+{
+ lp->ns_disc_done = 1;
+ lp->ns_disc_pending = 0;
+ if (lp->ns_disc_requested)
+ lp->tt.disc_start(lp);
+}
+
+/*
+ * Fill in request header.
+ */
+static void fc_ns_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
+ unsigned int op, unsigned int req_size)
+{
+ memset(ct, 0, sizeof(*ct) + req_size);
+ ct->ct_rev = FC_CT_REV;
+ ct->ct_fs_type = FC_FST_DIR;
+ ct->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct->ct_cmd = htons((u16) op);
+}
+
+static void fc_ns_gpn_ft_req(struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct fc_seq *sp = NULL;
+ struct req {
+ struct fc_ct_hdr ct;
+ struct fc_ns_gid_ft gid;
+ } *rp;
+ int error = 0;
+
+ lp->ns_disc_buf_len = 0;
+ lp->ns_disc_seq_count = 0;
+ fp = fc_frame_alloc(lp, sizeof(*rp));
+ if (fp == NULL) {
+ error = ENOMEM;
+ } else {
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ fc_ns_fill_dns_hdr(lp, &rp->ct, FC_NS_GPN_FT, sizeof(rp->gid));
+ rp->gid.fn_fc4_type = FC_TYPE_FCP;
+
+ WARN_ON(!fc_lport_test_ready(lp));
+
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+ sp = lp->tt.exch_seq_send(lp, fp,
+ fc_ns_gpn_ft_resp,
+ lp, lp->e_d_tov,
+ lp->fid,
+ lp->dns_rp->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ);
+ }
+ if (error || sp == NULL)
+ fcdt_ns_retry(lp);
+}
+
+/*
+ * Handle error on dNS request.
+ */
+static void fcdt_ns_error(struct fc_lport *lp, struct fc_frame *fp)
+{
+ int err = PTR_ERR(fp);
+
+ switch (err) {
+ case -FC_EX_TIMEOUT:
+ if (lp->ns_disc_retry_count++ < FC_NS_RETRY_LIMIT) {
+ fc_ns_gpn_ft_req(lp);
+ } else {
+ FC_DBG("err %d - ending", err);
+ fc_ns_disc_done(lp);
+ }
+ break;
+ default:
+ FC_DBG("err %d - ending", err);
+ fc_ns_disc_done(lp);
+ break;
+ }
+}
+
+/*
+ * Parse the list of port IDs and names resulting from a discovery request.
+ */
+static int fc_ns_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
+{
+ struct fc_gpn_ft_resp *np;
+ char *bp;
+ size_t plen;
+ size_t tlen;
+ int error = 0;
+ struct fc_ns_port *dp;
+
+ /*
+ * Handle partial name record left over from previous call.
+ */
+ bp = buf;
+ plen = len;
+ np = (struct fc_gpn_ft_resp *)bp;
+ tlen = lp->ns_disc_buf_len;
+ if (tlen) {
+ WARN_ON(tlen >= sizeof(*np));
+ plen = sizeof(*np) - tlen;
+ WARN_ON(plen <= 0);
+ WARN_ON(plen >= sizeof(*np));
+ if (plen > len)
+ plen = len;
+ np = &lp->ns_disc_buf;
+ memcpy((char *)np + tlen, bp, plen);
+
+ /*
+ * Set bp so that the loop below will advance it to the
+ * first valid full name element.
+ */
+ bp -= tlen;
+ len += tlen;
+ plen += tlen;
+ lp->ns_disc_buf_len = (unsigned char) plen;
+ if (plen == sizeof(*np))
+ lp->ns_disc_buf_len = 0;
+ }
+
+ /*
+ * Handle full name records, including the one filled from above.
+ * Normally, np == bp and plen == len, but from the partial case above,
+ * bp, len describe the overall buffer, and np, plen describe the
+ * partial buffer, which if would usually be full now.
+ * After the first time through the loop, things return to "normal".
+ */
+ while (plen >= sizeof(*np)) {
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ break;
+ dp->lp = lp;
+ dp->ids.port_id = ntoh24(np->fp_fid);
+ dp->ids.port_name = ntohll(np->fp_wwpn);
+ dp->ids.node_name = -1;
+ dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ error = fc_ns_gnn_id_req(lp, dp);
+ if (error)
+ break;
+ if (np->fp_flags & FC_NS_FID_LAST) {
+ fc_ns_disc_done(lp);
+ len = 0;
+ break;
+ }
+ len -= sizeof(*np);
+ bp += sizeof(*np);
+ np = (struct fc_gpn_ft_resp *)bp;
+ plen = len;
+ }
+
+ /*
+ * Save any partial record at the end of the buffer for next time.
+ */
+ if (error == 0 && len > 0 && len < sizeof(*np)) {
+ if (np != &lp->ns_disc_buf)
+ memcpy(&lp->ns_disc_buf, np, len);
+ lp->ns_disc_buf_len = (unsigned char) len;
+ } else {
+ lp->ns_disc_buf_len = 0;
+ }
+ return error;
+}
+
+/*
+ * Handle retry of memory allocation for remote ports.
+ */
+static void fc_ns_timeout(struct work_struct *work)
+{
+ struct fc_lport *lp;
+
+ lp = container_of(work, struct fc_lport, ns_disc_work.work);
+
+ if (lp->ns_disc_pending)
+ fc_ns_gpn_ft_req(lp);
+ else
+ lp->tt.disc_start(lp);
+}
+
+/*
+ * Handle a response frame from Get Port Names (GPN_FT).
+ * The response may be in multiple frames
+ */
+static void fc_ns_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lp = lp_arg;
+ struct fc_ct_hdr *cp;
+ struct fc_frame_header *fh;
+ unsigned int seq_cnt;
+ void *buf = NULL;
+ unsigned int len;
+ int error;
+
+ if (IS_ERR(fp)) {
+ fcdt_ns_error(lp, fp);
+ return;
+ }
+
+ WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
+ fh = fc_frame_header_get(fp);
+ len = fr_len(fp) - sizeof(*fh);
+ seq_cnt = ntohs(fh->fh_seq_cnt);
+ if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
+ lp->ns_disc_seq_count == 0) {
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (cp == NULL) {
+ FC_DBG("GPN_FT response too short. len %d",
+ fr_len(fp));
+ } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+
+ /*
+ * Accepted. Parse response.
+ */
+ buf = cp + 1;
+ len -= sizeof(*cp);
+ } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+ FC_DBG("GPN_FT rejected reason %x exp %x "
+ "(check zoning)", cp->ct_reason, cp->ct_explan);
+ fc_ns_disc_done(lp);
+ } else {
+ FC_DBG("GPN_FT unexpected response code %x\n",
+ ntohs(cp->ct_cmd));
+ }
+ } else if (fr_sof(fp) == FC_SOF_N3 &&
+ seq_cnt == lp->ns_disc_seq_count) {
+ buf = fh + 1;
+ } else {
+ FC_DBG("GPN_FT unexpected frame - out of sequence? "
+ "seq_cnt %x expected %x sof %x eof %x",
+ seq_cnt, lp->ns_disc_seq_count, fr_sof(fp), fr_eof(fp));
+ }
+ if (buf) {
+ error = fc_ns_gpn_ft_parse(lp, buf, len);
+ if (error)
+ fcdt_ns_retry(lp);
+ else
+ lp->ns_disc_seq_count++;
+ }
+ fc_frame_free(fp);
+}
+
+/*
+ * Discover the directory information for a single target.
+ * This could be from an RSCN that reported a change for the target.
+ */
+static void fc_ns_single(struct fc_lport *lp, struct fc_ns_port *dp)
+{
+ struct fc_rport *rport;
+
+ if (dp->ids.port_id == lp->fid)
+ goto out;
+
+ rport = lp->tt.rport_lookup(lp, dp->ids.port_id);
+ if (rport) {
+ fc_ns_del_target(lp, rport);
+ put_device(&rport->dev); /* hold from lookup */
+ }
+
+ if (fc_ns_gpn_id_req(lp, dp) != 0)
+ goto error;
+ return;
+error:
+ fc_ns_restart(lp);
+out:
+ kfree(dp);
+}
+
+/*
+ * Send Get Port Name by ID (GPN_ID) request.
+ * The remote port is held by the caller for us.
+ */
+static int fc_ns_gpn_id_req(struct fc_lport *lp, struct fc_ns_port *dp)
+{
+ struct fc_frame *fp;
+ struct req {
+ struct fc_ct_hdr ct;
+ struct fc_ns_fid fid;
+ } *cp;
+ int error = 0;
+
+ fp = fc_frame_alloc(lp, sizeof(*cp));
+ if (fp == NULL)
+ return -ENOMEM;
+
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ fc_ns_fill_dns_hdr(lp, &cp->ct, FC_NS_GPN_ID, sizeof(cp->fid));
+ hton24(cp->fid.fp_fid, dp->ids.port_id);
+
+ WARN_ON(!fc_lport_test_ready(lp));
+
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_ns_gpn_id_resp,
+ dp, lp->e_d_tov,
+ lp->fid,
+ lp->dns_rp->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ error = -ENOMEM;
+
+ return error;
+}
+
+/*
+ * Handle a response frame from Get Port Name by ID (GPN_ID).
+ */
+static void fc_ns_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *dp_arg)
+{
+ struct fc_ns_port *dp = dp_arg;
+ struct fc_lport *lp;
+ struct resp {
+ struct fc_ct_hdr ct;
+ __be64 wwn;
+ } *cp;
+ unsigned int cmd;
+
+ if (IS_ERR(fp)) {
+ fc_ns_gpn_id_error(dp, fp);
+ return;
+ }
+
+ lp = dp->lp;
+ WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
+
+ cp = fc_frame_payload_get(fp, sizeof(cp->ct));
+ if (cp == NULL) {
+ FC_DBG("GPN_ID response too short. len %d", fr_len(fp));
+ return;
+ }
+ cmd = ntohs(cp->ct.ct_cmd);
+ switch (cmd) {
+ case FC_FS_ACC:
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (cp == NULL) {
+ FC_DBG("GPN_ID response payload too short. len %d",
+ fr_len(fp));
+ break;
+ }
+ dp->ids.port_name = ntohll(cp->wwn);
+ fc_ns_gnn_id_req(lp, dp);
+ break;
+ case FC_FS_RJT:
+ fc_ns_restart(lp);
+ break;
+ default:
+ FC_DBG("GPN_ID unexpected CT response cmd %x\n", cmd);
+ break;
+ }
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle error from GPN_ID.
+ */
+static void fc_ns_gpn_id_error(struct fc_ns_port *dp, struct fc_frame *fp)
+{
+ struct fc_lport *lp = dp->lp;
+
+ switch (PTR_ERR(fp)) {
+ case -FC_EX_TIMEOUT:
+ fc_ns_restart(lp);
+ break;
+ case -FC_EX_CLOSED:
+ default:
+ break;
+ }
+ kfree(dp);
+}
+
+/*
+ * Setup session to dNS if not already set up.
+ */
+static void fc_ns_enter_dns(struct fc_lport *lp)
+{
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
+ struct fc_rport_identifiers ids = {
+ .port_id = FC_FID_DIR_SERV,
+ .port_name = -1,
+ .node_name = -1,
+ .roles = FC_RPORT_ROLE_UNKNOWN,
+ };
+
+ if (fc_ns_debug)
+ FC_DBG("Processing DNS state");
+
+ fc_lport_state_enter(lp, LPORT_ST_DNS);
+
+ if (!lp->dns_rp) {
+ /*
+ * Set up remote port to directory server.
+ */
+
+ /*
+ * we are called with the state_lock, but if rport_lookup_create
+ * needs to create a rport then it will sleep.
+ */
+ fc_lport_unlock(lp);
+ rport = lp->tt.rport_lookup(lp, ids.port_id);
+ if (rport == NULL)
+ rport = lp->tt.rport_create(lp, &ids);
+ fc_lport_lock(lp);
+ if (!rport)
+ goto err;
+ lp->dns_rp = rport;
+ }
+
+ rport = lp->dns_rp;
+ rp = rport->dd_data;
+
+ /*
+ * If dNS session isn't ready, start its logon.
+ */
+ if (rp->rp_state != RPORT_ST_READY) {
+ lp->tt.rport_login(rport);
+ } else {
+ del_timer(&lp->state_timer);
+ fc_ns_enter_reg_pn(lp);
+ }
+ return;
+
+ /*
+ * Resource allocation problem (malloc). Try again in 500 mS.
+ */
+err:
+ fc_ns_retry(lp);
+}
+
+/*
+ * Logoff DNS session.
+ * We should get an event call when the session has been logged out.
+ */
+static void fc_ns_enter_dns_stop(struct fc_lport *lp)
+{
+ struct fc_rport *rport = lp->dns_rp;
+
+ if (fc_ns_debug)
+ FC_DBG("Processing DNS_STOP state");
+
+ fc_lport_state_enter(lp, LPORT_ST_DNS_STOP);
+
+ if (rport)
+ lp->tt.rport_logout(rport);
+ else
+ lp->tt.lport_logout(lp);
+}
+
+/*
+ * Fill in dNS request header.
+ */
+static void
+fc_lport_fill_dns_hdr(struct fc_lport *lp, struct fc_ct_hdr *ct,
+ unsigned int op, unsigned int req_size)
+{
+ memset(ct, 0, sizeof(*ct) + req_size);
+ ct->ct_rev = FC_CT_REV;
+ ct->ct_fs_type = FC_FST_DIR;
+ ct->ct_fs_subtype = FC_NS_SUBTYPE;
+ ct->ct_cmd = htons(op);
+}
+
+/*
+ * Register port name with name server.
+ */
+static void fc_ns_enter_reg_pn(struct fc_lport *lp)
+{
+ struct fc_frame *fp;
+ struct req {
+ struct fc_ct_hdr ct;
+ struct fc_ns_rn_id rn;
+ } *req;
+
+ if (fc_ns_debug)
+ FC_DBG("Processing REG_PN state");
+
+ fc_lport_state_enter(lp, LPORT_ST_REG_PN);
+ fp = fc_frame_alloc(lp, sizeof(*req));
+ if (!fp) {
+ fc_ns_retry(lp);
+ return;
+ }
+ req = fc_frame_payload_get(fp, sizeof(*req));
+ memset(req, 0, sizeof(*req));
+ fc_lport_fill_dns_hdr(lp, &req->ct, FC_NS_RPN_ID, sizeof(req->rn));
+ hton24(req->rn.fr_fid.fp_fid, lp->fid);
+ put_unaligned_be64(lp->wwpn, &req->rn.fr_wwn);
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_ns_resp, lp,
+ lp->e_d_tov,
+ lp->fid,
+ lp->dns_rp->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_ns_retry(lp);
+}
+
+int fc_ns_init(struct fc_lport *lp)
+{
+ INIT_DELAYED_WORK(&lp->ns_disc_work, fc_ns_timeout);
+
+ if (!lp->tt.disc_start)
+ lp->tt.disc_start = fc_ns_disc_start;
+
+ if (!lp->tt.disc_recv_req)
+ lp->tt.disc_recv_req = fc_ns_recv_req;
+
+ if (!lp->tt.disc_enter_dns)
+ lp->tt.disc_enter_dns = fc_ns_enter_dns;
+
+ if (!lp->tt.disc_stop)
+ lp->tt.disc_stop = fc_ns_enter_dns_stop;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_ns_init);
+
+/*
+ * Send Get Port Name by ID (GNN_ID) request.
+ */
+static int fc_ns_gnn_id_req(struct fc_lport *lp, struct fc_ns_port *dp)
+{
+ struct fc_frame *fp;
+ struct req {
+ struct fc_ct_hdr ct;
+ struct fc_ns_fid fid;
+ } *cp;
+ int error = 0;
+
+ fp = fc_frame_alloc(lp, sizeof(*cp));
+ if (fp == NULL)
+ return -ENOMEM;
+
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ fc_ns_fill_dns_hdr(lp, &cp->ct, FC_NS_GNN_ID, sizeof(cp->fid));
+ hton24(cp->fid.fp_fid, dp->ids.port_id);
+
+ WARN_ON(!fc_lport_test_ready(lp));
+
+ fc_frame_setup(fp, FC_RCTL_DD_UNSOL_CTL, FC_TYPE_CT);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_ns_gnn_id_resp,
+ dp, lp->e_d_tov,
+ lp->fid,
+ lp->dns_rp->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ error = -ENOMEM;
+
+ return error;
+}
+
+/*
+ * Handle a response frame from Get Port Name by ID (GNN_ID).
+ */
+static void fc_ns_gnn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *dp_arg)
+{
+ struct fc_ns_port *dp = dp_arg;
+ struct fc_lport *lp;
+ struct resp {
+ struct fc_ct_hdr ct;
+ __be64 wwn;
+ } *cp;
+ unsigned int cmd;
+
+ if (IS_ERR(fp)) {
+ fc_ns_gnn_id_error(dp, fp);
+ return;
+ }
+
+ lp = dp->lp;
+ WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
+
+ cp = fc_frame_payload_get(fp, sizeof(cp->ct));
+ if (cp == NULL) {
+ FC_DBG("GNN_ID response too short. len %d", fr_len(fp));
+ return;
+ }
+ cmd = ntohs(cp->ct.ct_cmd);
+ switch (cmd) {
+ case FC_FS_ACC:
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (cp == NULL) {
+ FC_DBG("GNN_ID response payload too short. len %d",
+ fr_len(fp));
+ break;
+ }
+ dp->ids.node_name = ntohll(cp->wwn);
+ fc_ns_new_target(lp, NULL, &dp->ids);
+ break;
+ case FC_FS_RJT:
+ fc_ns_restart(lp);
+ break;
+ default:
+ FC_DBG("GNN_ID unexpected CT response cmd %x\n", cmd);
+ break;
+ }
+ kfree(dp);
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle error from GNN_ID.
+ */
+static void fc_ns_gnn_id_error(struct fc_ns_port *dp, struct fc_frame *fp)
+{
+ struct fc_lport *lp = dp->lp;
+
+ switch (PTR_ERR(fp)) {
+ case -FC_EX_TIMEOUT:
+ fc_ns_restart(lp);
+ break;
+ case -FC_EX_CLOSED:
+ default:
+ break;
+ }
+ kfree(dp);
+}
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 0000000..4050596
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,1265 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Remote Port support.
+ *
+ * A remote port structure contains information about an N port to which we
+ * will create sessions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+
+#include <scsi/libfc/libfc.h>
+
+static int fc_rp_debug;
+
+/*
+ * static functions.
+ */
+static void fc_rport_enter_start(struct fc_rport *);
+static void fc_rport_enter_plogi(struct fc_rport *);
+static void fc_rport_enter_prli(struct fc_rport *);
+static void fc_rport_enter_rtv(struct fc_rport *);
+static void fc_rport_enter_logo(struct fc_rport *);
+static void fc_rport_recv_plogi_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prli_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_prlo_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_recv_logo_req(struct fc_rport *,
+ struct fc_seq *, struct fc_frame *);
+static void fc_rport_timeout(struct work_struct *);
+
+static struct fc_rport *fc_remote_port_create(struct fc_lport *,
+ struct fc_rport_identifiers *);
+
+/**
+ * fc_rport_lookup - lookup a remote port by fcid
+ */
+struct fc_rport *fc_rport_lookup(const struct fc_lport *lp, u32 fid)
+{
+ struct Scsi_Host *shost = lp->host;
+ struct fc_rport *rport, *found;
+ unsigned long flags;
+
+ found = NULL;
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(rport, &fc_host_rports(shost), peers)
+ if (rport->port_id == fid &&
+ rport->port_state == FC_PORTSTATE_ONLINE) {
+ found = rport;
+ get_device(&found->dev);
+ break;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return found;
+}
+
+/**
+ * fc_remote_port_create - create a remote port.
+ * @vf: ptr to virtual fabric structure
+ * @port_name: world wide port name for the remote port
+ *
+ * create a new remote port struct and assign the virtual
+ * fabric to it. also the world wide port name.
+ */
+static struct fc_rport *fc_remote_port_create(struct fc_lport *lp,
+ struct fc_rport_identifiers *ids)
+{
+ struct fc_rport_libfc_priv *rp;
+ struct fc_rport *rport;
+
+ rport = fc_remote_port_add(lp->host, 0, ids);
+ if (!rport)
+ return NULL;
+
+ rp = rport->dd_data;
+ rp->local_port = lp;
+
+ /* default value until service parameters are exchanged in PLOGI */
+ rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+
+ spin_lock_init(&rp->rp_lock);
+ rp->rp_state = RPORT_ST_INIT;
+ rp->local_port = lp;
+ rp->e_d_tov = lp->e_d_tov;
+ rp->r_a_tov = lp->r_a_tov;
+ rp->flags = FC_RP_FLAGS_REC_SUPPORTED;
+ INIT_DELAYED_WORK(&rp->retry_work, fc_rport_timeout);
+
+ return rport;
+}
+
+/*
+ * Lock session.
+ */
+static inline void fc_rport_lock(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ spin_lock_bh(&rp->rp_lock);
+}
+
+/*
+ * Unlock session without invoking pending events.
+ */
+static inline void fc_rport_unlock(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ spin_unlock_bh(&rp->rp_lock);
+}
+
+static unsigned int
+fc_plogi_get_maxframe(struct fc_els_flogi *flp, unsigned int maxval)
+{
+ unsigned int mfs;
+
+ /*
+ * Get max payload from the common service parameters and the
+ * class 3 receive data field size.
+ */
+ mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ return maxval;
+}
+
+/*
+ * Fill in PLOGI command for request.
+ */
+static void
+fc_lport_plogi_fill(struct fc_lport *lp,
+ struct fc_els_flogi *flogi, unsigned int op)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lp->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lp->wwnn, &flogi->fl_wwnn);
+
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lp->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (op != ELS_FLOGI) {
+ sp->sp_features = htons(FC_SP_FT_CIRO);
+ sp->sp_tot_seq = htons(255); /* seq. we accept */
+ sp->sp_rel_off = htons(0x1f);
+ sp->sp_e_d_tov = htonl(lp->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lp->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+ }
+}
+
+static void fc_rport_state_enter(struct fc_rport *rport,
+ enum fc_rport_state new)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ if (rp->rp_state != new)
+ rp->retries = 0;
+ rp->rp_state = new;
+}
+
+/*
+ * Start the session login state machine.
+ * Set it to wait for the local_port to be ready if it isn't.
+ */
+int fc_rport_login(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+
+ fc_rport_lock(rport);
+ if (rp->rp_state == RPORT_ST_INIT) {
+ fc_rport_unlock(rport);
+ fc_rport_enter_start(rport);
+ } else if (rp->rp_state == RPORT_ST_ERROR) {
+ fc_rport_state_enter(rport, RPORT_ST_INIT);
+ fc_rport_unlock(rport);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x closed", rport->port_id);
+
+ if (rport == lp->dns_rp &&
+ lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+ } else
+ fc_rport_unlock(rport);
+
+ return 0;
+}
+
+/*
+ * Stop the session - log it off.
+ */
+int fc_rport_logout(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+
+ fc_rport_lock(rport);
+ switch (rp->rp_state) {
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ fc_rport_enter_logo(rport);
+ fc_rport_unlock(rport);
+ break;
+ default:
+ fc_rport_state_enter(rport, RPORT_ST_INIT);
+ fc_rport_unlock(rport);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x closed", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+
+ fc_remote_port_delete(rport);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Reset the session - assume it is logged off. Used after fabric logoff.
+ * The local port code takes care of resetting the exchange manager.
+ */
+void fc_rport_reset(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp;
+
+ if (fc_rp_debug)
+ FC_DBG("sess to %6x reset", rport->port_id);
+ fc_rport_lock(rport);
+
+ lp = rp->local_port;
+ fc_rport_state_enter(rport, RPORT_ST_INIT);
+ fc_rport_unlock(rport);
+
+ if (fc_rp_debug)
+ FC_DBG("remote %6x closed", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+}
+
+/*
+ * Reset all sessions for a local port session list.
+ * The vf_lock protects the list.
+ * Don't hold the lock over the reset call, instead hold the session
+ * as well as the next session on the list.
+ * Holding the session must guarantee it'll stay on the same list.
+ */
+void fc_rport_reset_list(struct fc_lport *lp)
+{
+ struct Scsi_Host *shost = lp->host;
+ struct fc_rport *rport;
+ struct fc_rport *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(rport, next, &fc_host_rports(shost), peers) {
+ lp->tt.rport_reset(rport);
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+static void fc_rport_enter_start(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+
+ /*
+ * If the local port is already logged on, advance to next state.
+ * Otherwise the local port will be logged on by fc_rport_unlock().
+ */
+ fc_rport_state_enter(rport, RPORT_ST_STARTED);
+
+ if (rport == lp->dns_rp || fc_lport_test_ready(lp))
+ fc_rport_enter_plogi(rport);
+}
+
+/*
+ * Handle exchange reject or retry exhaustion in various states.
+ */
+static void fc_rport_reject(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+ switch (rp->rp_state) {
+ case RPORT_ST_PLOGI:
+ case RPORT_ST_PRLI:
+ fc_rport_state_enter(rport, RPORT_ST_ERROR);
+ if (rport == lp->dns_rp &&
+ lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_state_enter(rport, RPORT_ST_READY);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x ready", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state == LPORT_ST_DNS) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->tt.disc_enter_dns(lp);
+ fc_lport_unlock(lp);
+ }
+ break;
+ case RPORT_ST_LOGO:
+ fc_rport_state_enter(rport, RPORT_ST_INIT);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x closed", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+ break;
+ case RPORT_ST_NONE:
+ case RPORT_ST_READY:
+ case RPORT_ST_ERROR:
+ case RPORT_ST_PLOGI_RECV:
+ case RPORT_ST_STARTED:
+ case RPORT_ST_INIT:
+ BUG();
+ break;
+ }
+ return;
+}
+
+/*
+ * Timeout handler for retrying after allocation failures or exchange timeout.
+ */
+static void fc_rport_timeout(struct work_struct *work)
+{
+ struct fc_rport_libfc_priv *rp =
+ container_of(work, struct fc_rport_libfc_priv, retry_work.work);
+ struct fc_rport *rport = (((void *)rp) - sizeof(struct fc_rport));
+
+ switch (rp->rp_state) {
+ case RPORT_ST_PLOGI:
+ fc_rport_enter_plogi(rport);
+ break;
+ case RPORT_ST_PRLI:
+ fc_rport_enter_prli(rport);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_rtv(rport);
+ break;
+ case RPORT_ST_LOGO:
+ fc_rport_enter_logo(rport);
+ break;
+ case RPORT_ST_READY:
+ case RPORT_ST_ERROR:
+ case RPORT_ST_INIT:
+ break;
+ case RPORT_ST_NONE:
+ case RPORT_ST_PLOGI_RECV:
+ case RPORT_ST_STARTED:
+ BUG();
+ break;
+ }
+ put_device(&rport->dev);
+}
+
+/*
+ * Handle retry for allocation failure via timeout.
+ */
+static void fc_rport_retry(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+
+ if (rp->retries < lp->max_retry_count) {
+ rp->retries++;
+ get_device(&rport->dev);
+ schedule_delayed_work(&rp->retry_work,
+ msecs_to_jiffies(rp->e_d_tov));
+ } else {
+ FC_DBG("sess %6x alloc failure in state %d, retries exhausted",
+ rport->port_id, rp->rp_state);
+ fc_rport_reject(rport);
+ }
+}
+
+/*
+ * Handle error from a sequence issued by the session state machine.
+ */
+static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ fc_rport_lock(rport);
+ if (fc_rp_debug)
+ FC_DBG("state %d error %ld retries %d\n",
+ rp->rp_state, PTR_ERR(fp), rp->retries);
+
+ if (PTR_ERR(fp) == -FC_EX_TIMEOUT &&
+ rp->retries++ >= rp->local_port->max_retry_count) {
+ get_device(&rport->dev);
+ schedule_delayed_work(&rp->retry_work, 0);
+ } else
+ fc_rport_reject(rport);
+
+ fc_rport_unlock(rport);
+}
+
+/*
+ * Handle incoming ELS PLOGI response.
+ * Save parameters of target. Finish exchange.
+ */
+static void fc_rport_plogi_recv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_els_ls_rjt *rjp;
+ struct fc_els_flogi *plp;
+ u64 wwpn, wwnn;
+ unsigned int tov;
+ u16 csp_seq;
+ u16 cssp_seq;
+ u8 op;
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ if (!IS_ERR(fp)) {
+ op = fc_frame_payload_op(fp);
+ fc_rport_lock(rport);
+ if (op == ELS_LS_ACC &&
+ (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+ wwpn = get_unaligned_be64(&plp->fl_wwpn);
+ wwnn = get_unaligned_be64(&plp->fl_wwnn);
+
+ fc_rport_set_name(rport, wwpn, wwnn);
+ tov = ntohl(plp->fl_csp.sp_e_d_tov);
+ if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
+ tov /= 1000;
+ if (tov > rp->e_d_tov)
+ rp->e_d_tov = tov;
+ csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+ cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
+ if (cssp_seq < csp_seq)
+ csp_seq = cssp_seq;
+ rp->max_seq = csp_seq;
+ rport->maxframe_size =
+ fc_plogi_get_maxframe(plp, rp->local_port->mfs);
+ if (rp->rp_state == RPORT_ST_PLOGI)
+ fc_rport_enter_prli(rport);
+ } else {
+ if (fc_rp_debug)
+ FC_DBG("bad PLOGI response");
+
+ rjp = fc_frame_payload_get(fp, sizeof(*rjp));
+ if (op == ELS_LS_RJT && rjp != NULL &&
+ rjp->er_reason == ELS_RJT_INPROG)
+ fc_rport_retry(rport); /* try again */
+ else
+ fc_rport_reject(rport); /* error */
+ }
+ fc_rport_unlock(rport);
+ fc_frame_free(fp);
+ } else {
+ fc_rport_error(rport, fp);
+ }
+}
+
+/*
+ * Send ELS (extended link service) PLOGI request to peer.
+ */
+static void fc_rport_enter_plogi(struct fc_rport *rport)
+{
+ struct fc_frame *fp;
+ struct fc_els_flogi *plogi;
+ struct fc_lport *lp;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ lp = rp->local_port;
+ fc_rport_state_enter(rport, RPORT_ST_PLOGI);
+ rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ fp = fc_frame_alloc(lp, sizeof(*plogi));
+ if (!fp)
+ return fc_rport_retry(rport);
+ plogi = fc_frame_payload_get(fp, sizeof(*plogi));
+ WARN_ON(!plogi);
+ fc_lport_plogi_fill(rp->local_port, plogi, ELS_PLOGI);
+ rp->e_d_tov = lp->e_d_tov;
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_rport_plogi_recv_resp,
+ rport, lp->e_d_tov,
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_rport_retry(rport);
+}
+
+static void fc_rport_prli_recv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ u32 fcp_parm = 0;
+ u8 op;
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+
+ fc_rport_lock(rport);
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
+ fcp_parm = ntohl(pp->spp.spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rp->flags |= FC_RP_FLAGS_RETRY;
+ }
+
+ rport->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ fc_rport_enter_rtv(rport);
+ fc_rport_unlock(rport);
+ fc_remote_port_rolechg(rport, roles);
+ } else {
+ FC_DBG("bad ELS response\n");
+ fc_rport_state_enter(rport, RPORT_ST_ERROR);
+ fc_rport_unlock(rport);
+ if (rport == lp->dns_rp && lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+ }
+
+ fc_frame_free(fp);
+}
+
+static void fc_rport_logo_recv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+ u8 op;
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+
+ fc_rport_lock(rport);
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ fc_rport_enter_rtv(rport);
+ fc_rport_unlock(rport);
+ } else {
+ FC_DBG("bad ELS response\n");
+ fc_rport_state_enter(rport, RPORT_ST_ERROR);
+ fc_rport_unlock(rport);
+ if (rport == lp->dns_rp && lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+ }
+
+ fc_frame_free(fp);
+}
+
+/*
+ * Send ELS PRLI request to target.
+ */
+static void fc_rport_enter_prli(struct fc_rport *rport)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_frame *fp;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+
+ fc_rport_state_enter(rport, RPORT_ST_PRLI);
+
+ /*
+ * Special case if session is for name server or any other
+ * well-known address: Skip the PRLI step.
+ * This should be made more general, possibly moved to the FCP layer.
+ */
+ if (rport->port_id >= FC_FID_DOM_MGR) {
+ fc_rport_state_enter(rport, RPORT_ST_READY);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x ready", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state == LPORT_ST_DNS) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->tt.disc_enter_dns(lp);
+ fc_lport_unlock(lp);
+ }
+ return;
+ }
+ fp = fc_frame_alloc(lp, sizeof(*pp));
+ if (!fp)
+ return fc_rport_retry(rport);
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ WARN_ON(!pp);
+ memset(pp, 0, sizeof(*pp));
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
+ pp->prli.prli_len = htons(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = htonl(rp->local_port->service_params);
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_rport_prli_recv_resp,
+ rport, lp->e_d_tov,
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_rport_retry(rport);
+}
+
+/*
+ * Handle incoming ELS response.
+ * Many targets don't seem to support this.
+ */
+static void fc_rport_els_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+ u8 op;
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rport, fp);
+ return;
+ }
+
+ fc_rport_lock(rport);
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ struct fc_els_rtv_acc *rtv;
+ u32 toq;
+ u32 tov;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ if (rtv) {
+ toq = ntohl(rtv->rtv_toq);
+ tov = ntohl(rtv->rtv_r_a_tov);
+ if (tov == 0)
+ tov = 1;
+ rp->r_a_tov = tov;
+ tov = ntohl(rtv->rtv_e_d_tov);
+ if (toq & FC_ELS_RTV_EDRES)
+ tov /= 1000000;
+ if (tov == 0)
+ tov = 1;
+ rp->e_d_tov = tov;
+ }
+ }
+ fc_rport_state_enter(rport, RPORT_ST_READY);
+ fc_rport_unlock(rport);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x ready", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state == LPORT_ST_DNS) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->tt.disc_enter_dns(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_frame_free(fp);
+}
+
+/*
+ * Send ELS RTV (Request Timeout Value) request to remote port.
+ */
+static void fc_rport_enter_rtv(struct fc_rport *rport)
+{
+ struct fc_els_rtv *rtv;
+ struct fc_frame *fp;
+ struct fc_lport *lp;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ lp = rp->local_port;
+ fc_rport_state_enter(rport, RPORT_ST_RTV);
+
+ fp = fc_frame_alloc(lp, sizeof(*rtv));
+ if (!fp)
+ return fc_rport_retry(rport);
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ WARN_ON(!rtv);
+ memset(rtv, 0, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_RTV;
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_rport_els_rtv_resp,
+ rport, lp->e_d_tov,
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_rport_retry(rport);
+}
+
+static void fc_rport_enter_logo(struct fc_rport *rport)
+{
+ struct fc_frame *fp;
+ struct fc_els_logo *logo;
+ struct fc_lport *lp;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+
+ fc_rport_state_enter(rport, RPORT_ST_LOGO);
+
+ lp = rp->local_port;
+ fp = fc_frame_alloc(lp, sizeof(*logo));
+ if (!fp)
+ return fc_rport_retry(rport);
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, lp->fid);
+ logo->fl_n_port_wwn = htonll(lp->wwpn);
+
+ fc_frame_setup(fp, FC_RCTL_ELS_REQ, FC_TYPE_ELS);
+ if (!lp->tt.exch_seq_send(lp, fp,
+ fc_rport_logo_recv_resp,
+ rport, lp->e_d_tov,
+ rp->local_port->fid,
+ rport->port_id,
+ FC_FC_SEQ_INIT | FC_FC_END_SEQ))
+ fc_rport_retry(rport);
+}
+
+/*
+ * Handle a request received by the exchange manager for the session.
+ * This may be an entirely new session, or a PLOGI or LOGO for an existing one.
+ * This will free the frame.
+ */
+void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, void *rp_arg)
+{
+ struct fc_rport *rport = rp_arg;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_frame_header *fh;
+ struct fc_lport *lp = rp->local_port;
+ struct fc_seq_els_data els_data;
+ u8 op;
+
+ els_data.fp = NULL;
+ els_data.explan = ELS_EXPL_NONE;
+ els_data.reason = ELS_RJT_NONE;
+
+ fh = fc_frame_header_get(fp);
+
+ if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
+ op = fc_frame_payload_op(fp);
+ switch (op) {
+ case ELS_PLOGI:
+ fc_rport_recv_plogi_req(rport, sp, fp);
+ break;
+ case ELS_PRLI:
+ fc_rport_recv_prli_req(rport, sp, fp);
+ break;
+ case ELS_PRLO:
+ fc_rport_recv_prlo_req(rport, sp, fp);
+ break;
+ case ELS_LOGO:
+ fc_rport_recv_logo_req(rport, sp, fp);
+ break;
+ case ELS_RRQ:
+ els_data.fp = fp;
+ lp->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
+ break;
+ case ELS_REC:
+ els_data.fp = fp;
+ lp->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
+ break;
+ default:
+ els_data.reason = ELS_RJT_UNSUP;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ break;
+ }
+ } else {
+ fc_frame_free(fp);
+ }
+}
+
+/*
+ * Handle incoming PLOGI request.
+ */
+static void fc_rport_recv_plogi_req(struct fc_rport *rport,
+ struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_frame *fp = rx_fp;
+ struct fc_frame_header *fh;
+ struct fc_lport *lp;
+ struct fc_els_flogi *pl;
+ struct fc_seq_els_data rjt_data;
+ u32 sid;
+ u64 wwpn;
+ u64 wwnn;
+ enum fc_els_rjt_reason reject = 0;
+ u32 f_ctl;
+
+ rjt_data.fp = NULL;
+ fh = fc_frame_header_get(fp);
+ sid = ntoh24(fh->fh_s_id);
+ pl = fc_frame_payload_get(fp, sizeof(*pl));
+ if (!pl) {
+ FC_DBG("incoming PLOGI from %x too short", sid);
+ WARN_ON(1);
+ /* XXX TBD: send reject? */
+ fc_frame_free(fp);
+ return;
+ }
+ wwpn = get_unaligned_be64(&pl->fl_wwpn);
+ wwnn = get_unaligned_be64(&pl->fl_wwnn);
+ fc_rport_lock(rport);
+ lp = rp->local_port;
+
+ /*
+ * If the session was just created, possibly due to the incoming PLOGI,
+ * set the state appropriately and accept the PLOGI.
+ *
+ * If we had also sent a PLOGI, and if the received PLOGI is from a
+ * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
+ * "command already in progress".
+ *
+ * XXX TBD: If the session was ready before, the PLOGI should result in
+ * all outstanding exchanges being reset.
+ */
+ switch (rp->rp_state) {
+ case RPORT_ST_INIT:
+ if (fc_rp_debug)
+ FC_DBG("incoming PLOGI from %6x wwpn %llx state INIT "
+ "- reject\n", sid, wwpn);
+ reject = ELS_RJT_UNSUP;
+ break;
+ case RPORT_ST_STARTED:
+ /*
+ * we'll only accept a login if the port name
+ * matches or was unknown.
+ */
+ if (rport->port_name != -1 &&
+ rport->port_name != wwpn) {
+ FC_DBG("incoming PLOGI from name %llx expected %llx\n",
+ wwpn, rport->port_name);
+ reject = ELS_RJT_UNAB;
+ }
+ break;
+ case RPORT_ST_PLOGI:
+ if (fc_rp_debug)
+ FC_DBG("incoming PLOGI from %x in PLOGI state %d",
+ sid, rp->rp_state);
+ if (wwpn < lp->wwpn)
+ reject = ELS_RJT_INPROG;
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_ERROR:
+ case RPORT_ST_READY:
+ if (fc_rp_debug)
+ FC_DBG("incoming PLOGI from %x in logged-in state %d "
+ "- ignored for now", sid, rp->rp_state);
+ /* XXX TBD - should reset */
+ break;
+ case RPORT_ST_NONE:
+ default:
+ if (fc_rp_debug)
+ FC_DBG("incoming PLOGI from %x in unexpected state %d",
+ sid, rp->rp_state);
+ break;
+ }
+
+ if (reject) {
+ rjt_data.reason = reject;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+ } else {
+ fp = fc_frame_alloc(lp, sizeof(*pl));
+ if (fp == NULL) {
+ fp = rx_fp;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+ } else {
+ sp = lp->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ fc_rport_set_name(rport, wwpn, wwnn);
+
+ /*
+ * Get session payload size from incoming PLOGI.
+ */
+ rport->maxframe_size =
+ fc_plogi_get_maxframe(pl, lp->mfs);
+ fc_frame_free(rx_fp);
+ pl = fc_frame_payload_get(fp, sizeof(*pl));
+ WARN_ON(!pl);
+ fc_lport_plogi_fill(lp, pl, ELS_LS_ACC);
+
+ /*
+ * Send LS_ACC. If this fails,
+ * the originator should retry.
+ */
+ f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ lp->tt.seq_send(lp, sp, fp, f_ctl);
+ if (rp->rp_state == RPORT_ST_PLOGI)
+ fc_rport_enter_prli(rport);
+ else
+ fc_rport_state_enter(rport,
+ RPORT_ST_PLOGI_RECV);
+ }
+ }
+ fc_rport_unlock(rport);
+}
+
+/*
+ * Handle incoming PRLI request.
+ */
+static void fc_rport_recv_prli_req(struct fc_rport *rport,
+ struct fc_seq *sp, struct fc_frame *rx_fp)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_lport *lp;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp *rspp; /* request service param page */
+ struct fc_els_spp *spp; /* response spp */
+ unsigned int len;
+ unsigned int plen;
+ enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
+ enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
+ enum fc_els_spp_resp resp;
+ struct fc_seq_els_data rjt_data;
+ u32 f_ctl;
+ u32 fcp_parm;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+
+ rjt_data.fp = NULL;
+ fh = fc_frame_header_get(rx_fp);
+ lp = rp->local_port;
+ switch (rp->rp_state) {
+ case RPORT_ST_PLOGI_RECV:
+ case RPORT_ST_PRLI:
+ case RPORT_ST_READY:
+ reason = ELS_RJT_NONE;
+ break;
+ default:
+ break;
+ }
+ len = fr_len(rx_fp) - sizeof(*fh);
+ pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+ if (pp == NULL) {
+ reason = ELS_RJT_PROT;
+ explan = ELS_EXPL_INV_LEN;
+ } else {
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len) {
+ reason = ELS_RJT_PROT;
+ explan = ELS_EXPL_INV_LEN;
+ } else if (plen < len) {
+ len = plen;
+ }
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp)) {
+ reason = ELS_RJT_PROT;
+ explan = ELS_EXPL_INV_LEN;
+ }
+ rspp = &pp->spp;
+ }
+ if (reason != ELS_RJT_NONE ||
+ (fp = fc_frame_alloc(lp, len)) == NULL) {
+ rjt_data.reason = reason;
+ rjt_data.explan = explan;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ } else {
+ sp = lp->tt.seq_start_next(sp);
+ WARN_ON(!sp);
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
+
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ while (len >= plen) {
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+ resp = FC_SPP_RESP_ACK;
+ if (rspp->spp_flags & FC_SPP_RPA_VAL)
+ resp = FC_SPP_RESP_NO_PA;
+ switch (rspp->spp_type) {
+ case 0: /* common to all FC-4 types */
+ break;
+ case FC_TYPE_FCP:
+ fcp_parm = ntohl(rspp->spp_params);
+ if (fcp_parm * FCP_SPPF_RETRY)
+ rp->flags |= FC_RP_FLAGS_RETRY;
+ rport->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+ fc_remote_port_rolechg(rport, roles);
+ spp->spp_params =
+ htonl(rp->local_port->service_params);
+ break;
+ default:
+ resp = FC_SPP_RESP_INVL;
+ break;
+ }
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ f_ctl = FC_FC_SEQ_INIT | FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ fc_frame_setup(fp, FC_RCTL_ELS_REP, FC_TYPE_ELS);
+ lp->tt.seq_send(lp, sp, fp, f_ctl);
+
+ /*
+ * Get lock and re-check state.
+ */
+ fc_rport_lock(rport);
+ switch (rp->rp_state) {
+ case RPORT_ST_PLOGI_RECV:
+ case RPORT_ST_PRLI:
+ fc_rport_state_enter(rport, RPORT_ST_READY);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x ready", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state == LPORT_ST_DNS) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->tt.disc_enter_dns(lp);
+ fc_lport_unlock(lp);
+ }
+ break;
+ case RPORT_ST_READY:
+ break;
+ default:
+ break;
+ }
+ fc_rport_unlock(rport);
+ }
+ fc_frame_free(rx_fp);
+}
+
+/*
+ * Handle incoming PRLO request.
+ */
+static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_frame_header *fh;
+ struct fc_lport *lp = rp->local_port;
+ struct fc_seq_els_data rjt_data;
+
+ fh = fc_frame_header_get(fp);
+ FC_DBG("incoming PRLO from %x state %d",
+ ntoh24(fh->fh_s_id), rp->rp_state);
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/*
+ * Handle incoming LOGO request.
+ */
+static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_lport *lp = rp->local_port;
+
+ fh = fc_frame_header_get(fp);
+ fc_rport_lock(rport);
+ fc_rport_state_enter(rport, RPORT_ST_INIT);
+ fc_rport_unlock(rport);
+ if (fc_rp_debug)
+ FC_DBG("remote %6x closed", rport->port_id);
+ if (rport == lp->dns_rp &&
+ lp->state != LPORT_ST_RESET) {
+ fc_lport_lock(lp);
+ del_timer(&lp->state_timer);
+ lp->dns_rp = NULL;
+ if (lp->state == LPORT_ST_DNS_STOP) {
+ fc_lport_unlock(lp);
+ lp->tt.lport_logout(lp);
+ } else {
+ lp->tt.lport_login(lp);
+ fc_lport_unlock(lp);
+ }
+ fc_remote_port_delete(rport);
+ }
+ lp->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+ fc_frame_free(fp);
+}
+
+int fc_rport_init(struct fc_lport *lp)
+{
+ if (!lp->tt.rport_login)
+ lp->tt.rport_login = fc_rport_login;
+
+ if (!lp->tt.rport_logout)
+ lp->tt.rport_logout = fc_rport_logout;
+
+ if (!lp->tt.rport_recv_req)
+ lp->tt.rport_recv_req = fc_rport_recv_req;
+
+ if (!lp->tt.rport_create)
+ lp->tt.rport_create = fc_remote_port_create;
+
+ if (!lp->tt.rport_lookup)
+ lp->tt.rport_lookup = fc_rport_lookup;
+
+ if (!lp->tt.rport_reset)
+ lp->tt.rport_reset = fc_rport_reset;
+
+ if (!lp->tt.rport_reset_list)
+ lp->tt.rport_reset_list = fc_rport_reset_list;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_rport_init);
diff --git a/include/scsi/libfc/fc_frame.h b/include/scsi/libfc/fc_frame.h
new file mode 100644
index 0000000..c7a52bb
--- /dev/null
+++ b/include/scsi/libfc/fc_frame.h
@@ -0,0 +1,236 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FRAME_H_
+#define _FC_FRAME_H_
+
+#include <linux/scatterlist.h>
+#include <linux/skbuff.h>
+
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_encaps.h>
+
+/*
+ * The fc_frame interface is used to pass frame data between functions.
+ * The frame includes the data buffer, length, and SOF / EOF delimiter types.
+ * A pointer to the port structure of the receiving port is also includeded.
+ */
+
+#define FC_FRAME_HEADROOM 32 /* headroom for VLAN + FCoE headers */
+#define FC_FRAME_TAILROOM 8 /* trailer space for FCoE */
+
+/*
+ * Information about an individual fibre channel frame received or to be sent.
+ * The buffer may be in up to 4 additional non-contiguous sections,
+ * but the linear section must hold the frame header.
+ */
+#define FC_FRAME_SG_LEN 4 /* scatter/gather list maximum length */
+
+#define fp_skb(fp) (&((fp)->skb))
+#define fr_hdr(fp) ((fp)->skb.data)
+#define fr_len(fp) ((fp)->skb.len)
+#define fr_cb(fp) ((struct fcoe_rcv_info *)&((fp)->skb.cb[0]))
+#define fr_dev(fp) (fr_cb(fp)->fr_dev)
+#define fr_seq(fp) (fr_cb(fp)->fr_seq)
+#define fr_sof(fp) (fr_cb(fp)->fr_sof)
+#define fr_eof(fp) (fr_cb(fp)->fr_eof)
+#define fr_flags(fp) (fr_cb(fp)->fr_flags)
+
+struct fc_frame {
+ struct sk_buff skb;
+};
+
+struct fcoe_rcv_info {
+ struct packet_type *ptype;
+ struct fc_lport *fr_dev; /* transport layer private pointer */
+ struct fc_seq *fr_seq; /* for use with exchange manager */
+ enum fc_sof fr_sof; /* start of frame delimiter */
+ enum fc_eof fr_eof; /* end of frame delimiter */
+ u8 fr_flags; /* flags - see below */
+};
+
+/*
+ * Get fc_frame pointer for an skb that's already been imported.
+ */
+static inline struct fcoe_rcv_info *fcoe_dev_from_skb(const struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct fcoe_rcv_info) > sizeof(skb->cb));
+ return (struct fcoe_rcv_info *) skb->cb;
+}
+
+/*
+ * fr_flags.
+ */
+#define FCPHF_CRC_UNCHECKED 0x01 /* CRC not computed, still appended */
+
+/*
+ * Initialize a frame.
+ * We don't do a complete memset here for performance reasons.
+ * The caller must set fr_free, fr_hdr, fr_len, fr_sof, and fr_eof eventually.
+ */
+static inline void fc_frame_init(struct fc_frame *fp)
+{
+ fr_dev(fp) = NULL;
+ fr_seq(fp) = NULL;
+ fr_flags(fp) = 0;
+}
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *, size_t payload_len);
+
+struct fc_frame *__fc_frame_alloc(size_t payload_len);
+
+/*
+ * Get frame for sending via port.
+ */
+static inline struct fc_frame *_fc_frame_alloc(struct fc_lport *dev,
+ size_t payload_len)
+{
+ return __fc_frame_alloc(payload_len);
+}
+
+/*
+ * Allocate fc_frame structure and buffer. Set the initial length to
+ * payload_size + sizeof (struct fc_frame_header).
+ */
+static inline struct fc_frame *fc_frame_alloc(struct fc_lport *dev, size_t len)
+{
+ struct fc_frame *fp;
+
+ /*
+ * Note: Since len will often be a constant multiple of 4,
+ * this check will usually be evaluated and eliminated at compile time.
+ */
+ if ((len % 4) != 0)
+ fp = fc_frame_alloc_fill(dev, len);
+ else
+ fp = _fc_frame_alloc(dev, len);
+ return fp;
+}
+
+/*
+ * Free the fc_frame structure and buffer.
+ */
+static inline void fc_frame_free(struct fc_frame *fp)
+{
+ kfree_skb(fp_skb(fp));
+}
+
+static inline int fc_frame_is_linear(struct fc_frame *fp)
+{
+ return !skb_is_nonlinear(fp_skb(fp));
+}
+
+/*
+ * Get frame header from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking.
+ */
+static inline
+struct fc_frame_header *fc_frame_header_get(const struct fc_frame *fp)
+{
+ WARN_ON(fr_len(fp) < sizeof(struct fc_frame_header));
+ return (struct fc_frame_header *) fr_hdr(fp);
+}
+
+/*
+ * Get frame payload from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking.
+ * The len parameter is the minimum length for the payload portion.
+ * Returns NULL if the frame is too short.
+ *
+ * This assumes the interesting part of the payload is in the first part
+ * of the buffer for received data. This may not be appropriate to use for
+ * buffers being transmitted.
+ */
+static inline void *fc_frame_payload_get(const struct fc_frame *fp,
+ size_t len)
+{
+ void *pp = NULL;
+
+ if (fr_len(fp) >= sizeof(struct fc_frame_header) + len)
+ pp = fc_frame_header_get(fp) + 1;
+ return pp;
+}
+
+/*
+ * Get frame payload opcode (first byte) from message in fc_frame structure.
+ * This hides a cast and provides a place to add some checking. Return 0
+ * if the frame has no payload.
+ */
+static inline u8 fc_frame_payload_op(const struct fc_frame *fp)
+{
+ u8 *cp;
+
+ cp = fc_frame_payload_get(fp, sizeof(u8));
+ if (!cp)
+ return 0;
+ return *cp;
+
+}
+
+/*
+ * Get FC class from frame.
+ */
+static inline enum fc_class fc_frame_class(const struct fc_frame *fp)
+{
+ return fc_sof_class(fr_sof(fp));
+}
+
+/*
+ * Set r_ctl and type in preparation for sending frame.
+ * This also clears fh_parm_offset.
+ */
+static inline void fc_frame_setup(struct fc_frame *fp, enum fc_rctl r_ctl,
+ enum fc_fh_type type)
+{
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ WARN_ON(r_ctl == 0);
+ fh->fh_r_ctl = r_ctl;
+ fh->fh_type = type;
+ fh->fh_parm_offset = htonl(0);
+}
+
+/*
+ * Set offset in preparation for sending frame.
+ */
+static inline void
+fc_frame_set_offset(struct fc_frame *fp, u32 offset)
+{
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_parm_offset = htonl(offset);
+}
+
+/*
+ * Check the CRC in a frame.
+ * The CRC immediately follows the last data item *AFTER* the length.
+ * The return value is zero if the CRC matches.
+ */
+u32 fc_frame_crc_check(struct fc_frame *);
+
+/*
+ * Check for leaks.
+ * Print the frame header of any currently allocated frame, assuming there
+ * should be none at this point.
+ */
+void fc_frame_leak_check(void);
+
+#endif /* _FC_FRAME_H_ */
diff --git a/include/scsi/libfc/libfc.h b/include/scsi/libfc/libfc.h
new file mode 100644
index 0000000..d3a2569
--- /dev/null
+++ b/include/scsi/libfc/libfc.h
@@ -0,0 +1,737 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _LIBFC_H_
+#define _LIBFC_H_
+
+#include <linux/timer.h>
+#include <linux/if.h>
+
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/fc/fc_fcp.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+
+#include <scsi/libfc/fc_frame.h>
+
+#define LIBFC_DEBUG
+
+#ifdef LIBFC_DEBUG
+/*
+ * Log message.
+ */
+#define FC_DBG(fmt, args...) \
+ do { \
+ printk(KERN_INFO "%s " fmt, __func__, ##args); \
+ } while (0)
+#else
+#define FC_DBG(fmt, args...)
+#endif
+
+/*
+ * libfc error codes
+ */
+#define FC_NO_ERR 0 /* no error */
+#define FC_EX_TIMEOUT 1 /* Exchange timeout */
+#define FC_EX_CLOSED 2 /* Exchange closed */
+
+/* some helpful macros */
+
+#define ntohll(x) be64_to_cpu(x)
+#define htonll(x) cpu_to_be64(x)
+
+#define ntoh24(p) (((p)[0] << 16) | ((p)[1] << 8) | ((p)[2]))
+
+#define hton24(p, v) do { \
+ p[0] = (((v) >> 16) & 0xFF); \
+ p[1] = (((v) >> 8) & 0xFF); \
+ p[2] = ((v) & 0xFF); \
+} while (0)
+
+struct fc_exch_mgr;
+
+/*
+ * tgt_flags
+ */
+#define FC_TGT_REC_SUPPORTED (1 << 0)
+
+/*
+ * FC HBA status
+ */
+#define FC_PAUSE (1 << 1)
+#define FC_LINK_UP (1 << 0)
+
+/* for fc_softc */
+#define FC_MAX_OUTSTANDING_COMMANDS 1024
+
+/*
+ * Transport Capabilities
+ */
+#define TRANS_C_SG (1 << 0) /* Scatter gather */
+
+enum fc_lport_state {
+ LPORT_ST_NONE = 0,
+ LPORT_ST_FLOGI,
+ LPORT_ST_DNS,
+ LPORT_ST_REG_PN,
+ LPORT_ST_REG_FT,
+ LPORT_ST_SCR,
+ LPORT_ST_READY,
+ LPORT_ST_DNS_STOP,
+ LPORT_ST_LOGO,
+ LPORT_ST_RESET
+};
+
+enum fc_rport_state {
+ RPORT_ST_NONE = 0,
+ RPORT_ST_INIT, /* initialized */
+ RPORT_ST_STARTED, /* started */
+ RPORT_ST_PLOGI, /* waiting for PLOGI completion */
+ RPORT_ST_PLOGI_RECV, /* received PLOGI (as target) */
+ RPORT_ST_PRLI, /* waiting for PRLI completion */
+ RPORT_ST_RTV, /* waiting for RTV completion */
+ RPORT_ST_ERROR, /* error */
+ RPORT_ST_READY, /* ready for use */
+ RPORT_ST_LOGO, /* port logout sent */
+};
+
+/*
+ * Fibre Channel information about remote N port.
+ */
+struct fc_rport_libfc_priv {
+ struct fc_lport *local_port;
+ enum fc_rport_state rp_state;
+ u16 flags;
+ #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
+ #define FC_RP_FLAGS_RETRY (1 << 1)
+ u16 max_seq; /* max concurrent sequences */
+ unsigned int retries; /* retry count in current state */
+ unsigned int e_d_tov; /* negotiated e_d_tov (msec) */
+ unsigned int r_a_tov; /* received r_a_tov (msec) */
+ spinlock_t rp_lock; /* lock on state changes */
+ struct delayed_work retry_work;
+};
+
+static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
+{
+ rport->node_name = wwnn;
+ rport->port_name = wwpn;
+}
+
+/*
+ * fcoe stats structure
+ */
+struct fcoe_dev_stats {
+ u64 SecondsSinceLastReset;
+ u64 TxFrames;
+ u64 TxWords;
+ u64 RxFrames;
+ u64 RxWords;
+ u64 ErrorFrames;
+ u64 DumpedFrames;
+ u64 LinkFailureCount;
+ u64 LossOfSignalCount;
+ u64 InvalidTxWordCount;
+ u64 InvalidCRCCount;
+ u64 InputRequests;
+ u64 OutputRequests;
+ u64 ControlRequests;
+ u64 InputMegabytes;
+ u64 OutputMegabytes;
+};
+
+/*
+ * els data is used for passing ELS respone specific
+ * data to send ELS response mainly using infomation
+ * in exchange and sequence in EM layer.
+ */
+struct fc_seq_els_data {
+ struct fc_frame *fp;
+ enum fc_els_rjt_reason reason;
+ enum fc_els_rjt_explan explan;
+};
+
+struct libfc_function_template {
+
+ /**
+ * Mandatory Fields
+ *
+ * These handlers must be implemented by the LLD.
+ */
+
+ /*
+ * Interface to send a FC frame
+ */
+ int (*frame_send)(struct fc_lport *lp, struct fc_frame *fp);
+
+ /**
+ * Optional Fields
+ *
+ * The LLD may choose to implement any of the following handlers.
+ * If LLD doesn't specify hander and leaves its pointer NULL then
+ * the default libfc function will be used for that handler.
+ */
+
+ /**
+ * Exhance Manager interfaces
+ */
+
+ /*
+ * Send the FC frame payload using a new exchange and sequence.
+ *
+ * The frame pointer with some of the header's fields must be
+ * filled before calling exch_seq_send(), those fields are,
+ *
+ * - routing control
+ * - FC header type
+ * - parameter or relative offset
+ *
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The response handler argumemt resp_arg is passed back to resp
+ * handler when it is invoked by EM layer in above mentioned
+ * two scenarios.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
+ * The caller also need to specify FC sid, did and frame control field.
+ */
+ struct fc_seq *(*exch_seq_send)(struct fc_lport *lp,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void *resp_arg, unsigned int timer_msec,
+ u32 sid, u32 did, u32 f_ctl);
+
+ /*
+ * send a frame using existing sequence and exchange.
+ */
+ int (*seq_send)(struct fc_lport *lp, struct fc_seq *sp,
+ struct fc_frame *fp, u32 f_ctl);
+
+ /*
+ * Send ELS response using mainly infomation
+ * in exchange and sequence in EM layer.
+ */
+ void (*seq_els_rsp_send)(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data);
+
+ /*
+ * Abort an exchange and sequence. Generally called because of a
+ * timeout or an abort from the upper layer.
+ */
+ int (*seq_exch_abort)(const struct fc_seq *req_sp);
+
+ /*
+ * Indicate that an exchange/sequence tuple is complete and the memory
+ * allocated for the related objects may be freed.
+ */
+ void (*exch_done)(struct fc_seq *sp);
+
+ /*
+ * Assigns a EM and a free XID for an new exchange and then
+ * allocates a new exchange and sequence pair.
+ * The fp can be used to determine free XID.
+ */
+ struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
+
+ /*
+ * Release previously assigned XID by exch_get API.
+ * The LLD may implement this if XID is assigned by LLD
+ * in exch_get().
+ */
+ void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
+ u16 ex_id);
+
+ /*
+ * Start a new sequence on the same exchange/sequence tuple.
+ */
+ struct fc_seq *(*seq_start_next)(struct fc_seq *sp);
+
+ /*
+ * Reset an exchange manager, completing all sequences and exchanges.
+ * If s_id is non-zero, reset only exchanges originating from that FID.
+ * If d_id is non-zero, reset only exchanges sending to that FID.
+ */
+ void (*exch_mgr_reset)(struct fc_exch_mgr *,
+ u32 s_id, u32 d_id);
+
+ /*
+ * Get exchange Ids of a sequence
+ */
+ void (*seq_get_xids)(struct fc_seq *sp, u16 *oxid, u16 *rxid);
+
+ /*
+ * Set REC data to a sequence
+ */
+ void (*seq_set_rec_data)(struct fc_seq *sp, u32 rec_data);
+
+ /**
+ * Local Port interfaces
+ */
+
+ /*
+ * Receive a frame to a local port.
+ */
+ void (*lport_recv)(struct fc_lport *lp, struct fc_seq *sp,
+ struct fc_frame *fp);
+
+ int (*lport_login)(struct fc_lport *);
+ int (*lport_reset)(struct fc_lport *);
+ int (*lport_logout)(struct fc_lport *);
+
+ /**
+ * Remote Port interfaces
+ */
+
+ /*
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ */
+ int (*rport_login)(struct fc_rport *rport);
+
+ /*
+ * Logs the specified local port out of a N_Port identified
+ * by the ID provided.
+ */
+ int (*rport_logout)(struct fc_rport *rport);
+
+ void (*rport_recv_req)(struct fc_seq *, struct fc_frame *, void *);
+
+ struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
+
+ struct fc_rport *(*rport_create)(struct fc_lport *,
+ struct fc_rport_identifiers *);
+
+ void (*rport_reset)(struct fc_rport *);
+
+ void (*rport_reset_list)(struct fc_lport *);
+
+ /**
+ * SCSI interfaces
+ */
+
+ /*
+ * Used at least durring linkdown and reset
+ */
+ void (*scsi_cleanup)(struct fc_lport *);
+
+ /*
+ * Abort all I/O on a local port
+ */
+ void (*scsi_abort_io)(struct fc_lport *);
+
+ /**
+ * Discovery interfaces
+ */
+
+ void (*disc_recv_req)(struct fc_seq *,
+ struct fc_frame *, struct fc_lport *);
+
+ /*
+ * Start discovery for a local port.
+ */
+ int (*disc_start)(struct fc_lport *);
+
+ void (*disc_enter_dns)(struct fc_lport *);
+ void (*disc_stop)(struct fc_lport *);
+};
+
+struct fc_lport {
+ struct list_head list;
+
+ /* Associations */
+ struct Scsi_Host *host;
+ struct fc_exch_mgr *emp;
+ struct fc_rport *dns_rp;
+ struct fc_rport *ptp_rp;
+ void *scsi_priv;
+
+ /* Operational Information */
+ struct libfc_function_template tt;
+ u16 link_status;
+ u8 ns_disc_done;
+ enum fc_lport_state state;
+ unsigned long boot_time;
+
+ struct fc_host_statistics host_stats;
+ struct fcoe_dev_stats *dev_stats[NR_CPUS];
+
+ u64 wwpn;
+ u64 wwnn;
+ u32 fid;
+ u8 retry_count;
+ unsigned char ns_disc_retry_count;
+ unsigned char ns_disc_delay;
+ unsigned char ns_disc_pending;
+ unsigned char ns_disc_requested;
+ unsigned short ns_disc_seq_count;
+ unsigned char ns_disc_buf_len;
+
+ /* Capabilities */
+ char ifname[IFNAMSIZ];
+ u32 capabilities;
+ u32 mfs; /* max FC payload size */
+ unsigned int service_params;
+ unsigned int e_d_tov;
+ unsigned int r_a_tov;
+ u8 max_retry_count;
+ u16 link_speed;
+ u16 link_supported_speeds;
+ struct fc_ns_fts fcts; /* FC-4 type masks */
+ struct fc_els_rnid_gen rnid_gen; /* RNID information */
+
+ /* Locks */
+ spinlock_t state_lock; /* serializes state changes */
+
+ /* Miscellaneous */
+ struct fc_gpn_ft_resp ns_disc_buf; /* partial name buffer */
+ struct timer_list state_timer; /* timer for state events */
+ struct delayed_work ns_disc_work;
+
+ void *drv_priv;
+};
+
+/**
+ * FC_LPORT HELPER FUNCTIONS
+ *****************************/
+
+static inline int fc_lport_test_ready(struct fc_lport *lp)
+{
+ return lp->state == LPORT_ST_READY;
+}
+
+static inline u32 fc_lport_get_fid(const struct fc_lport *lp)
+{
+ return lp->fid;
+}
+
+static inline void fc_set_wwnn(struct fc_lport *lp, u64 wwnn)
+{
+ lp->wwnn = wwnn;
+}
+
+static inline void fc_set_wwpn(struct fc_lport *lp, u64 wwnn)
+{
+ lp->wwpn = wwnn;
+}
+
+static inline int fc_lport_locked(struct fc_lport *lp)
+{
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+ return spin_is_locked(&lp->state_lock);
+#else
+ return 1;
+#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
+}
+
+/*
+ * Locking code.
+ */
+static inline void fc_lport_lock(struct fc_lport *lp)
+{
+ spin_lock_bh(&lp->state_lock);
+}
+
+static inline void fc_lport_unlock(struct fc_lport *lp)
+{
+ spin_unlock_bh(&lp->state_lock);
+}
+
+static inline void fc_lport_state_enter(struct fc_lport *lp,
+ enum fc_lport_state state)
+{
+ WARN_ON(!fc_lport_locked(lp));
+ del_timer(&lp->state_timer);
+ if (state != lp->state)
+ lp->retry_count = 0;
+ lp->state = state;
+}
+
+
+/**
+ * LOCAL PORT LAYER
+ *****************************/
+int fc_lport_init(struct fc_lport *lp);
+
+/*
+ * Destroy the specified local port by finding and freeing all
+ * fc_rports associated with it and then by freeing the fc_lport
+ * itself.
+ */
+int fc_lport_destroy(struct fc_lport *lp);
+
+/*
+ * Logout the specified local port from the fabric
+ */
+int fc_fabric_logoff(struct fc_lport *lp);
+
+/*
+ * Initiate the LP state machine. This handler will use fc_host_attr
+ * to store the FLOGI service parameters, so fc_host_attr must be
+ * initialized before calling this handler.
+ */
+int fc_fabric_login(struct fc_lport *lp);
+
+/*
+ * The link is up for the given local port.
+ */
+void fc_linkup(struct fc_lport *);
+
+/*
+ * Link is down for the given local port.
+ */
+void fc_linkdown(struct fc_lport *);
+
+/*
+ * Pause and unpause traffic.
+ */
+void fc_pause(struct fc_lport *);
+void fc_unpause(struct fc_lport *);
+
+/*
+ * Configure the local port.
+ */
+int fc_lport_config(struct fc_lport *);
+
+/*
+ * Reset the local port.
+ */
+int fc_lport_enter_reset(struct fc_lport *);
+
+/*
+ * Set the mfs or reset
+ */
+int fc_set_mfs(struct fc_lport *lp, u32 mfs);
+
+
+/**
+ * REMOTE PORT LAYER
+ *****************************/
+int fc_rport_init(struct fc_lport *lp);
+
+
+/**
+ * DISCOVERY LAYER
+ *****************************/
+int fc_ns_init(struct fc_lport *lp);
+
+
+/**
+ * SCSI LAYER
+ *****************************/
+/*
+ * Initialize the SCSI block of libfc
+ */
+int fc_fcp_init(struct fc_lport *);
+
+/*
+ * This section provides an API which allows direct interaction
+ * with the SCSI-ml. Each of these functions satisfies a function
+ * pointer defined in Scsi_Host and therefore is always called
+ * directly from the SCSI-ml.
+ */
+int fc_queuecommand(struct scsi_cmnd *sc_cmd,
+ void (*done)(struct scsi_cmnd *));
+
+/*
+ * Send an ABTS frame to the target device. The sc_cmd argument
+ * is a pointer to the SCSI command to be aborted.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Reset a LUN by sending send the tm cmd to the target.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Reset the host adapter.
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd);
+
+/*
+ * Check rport status.
+ */
+int fc_slave_alloc(struct scsi_device *sdev);
+
+/*
+ * Adjust the queue depth.
+ */
+int fc_change_queue_depth(struct scsi_device *sdev, int qdepth);
+
+/*
+ * Change the tag type.
+ */
+int fc_change_queue_type(struct scsi_device *sdev, int tag_type);
+
+/*
+ * Free memory pools used by the FCP layer.
+ */
+void fc_fcp_destroy(struct fc_lport *);
+
+
+/**
+ * EXCHANGE MANAGER LAYER
+ *****************************/
+/*
+ * Initializes Exchange Manager related
+ * function pointers in struct libfc_function_template.
+ */
+int fc_exch_init(struct fc_lport *lp);
+
+/*
+ * Allocates an Exchange Manager (EM).
+ *
+ * The EM manages exchanges for their allocation and
+ * free, also allows exchange lookup for received
+ * frame.
+ *
+ * The class is used for initializing FC class of
+ * allocated exchange from EM.
+ *
+ * The min_xid and max_xid will limit new
+ * exchange ID (XID) within this range for
+ * a new exchange.
+ * The LLD may choose to have multiple EMs,
+ * e.g. one EM instance per CPU receive thread in LLD.
+ * The LLD can use exch_get() of struct libfc_function_template
+ * to specify XID for a new exchange within
+ * a specified EM instance.
+ *
+ * The em_idx to uniquely identify an EM instance.
+ */
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
+ enum fc_class class,
+ u16 min_xid,
+ u16 max_xid,
+ u32 em_idx);
+
+/*
+ * Free an exchange manager.
+ */
+void fc_exch_mgr_free(struct fc_exch_mgr *mp);
+
+/*
+ * Receive a frame on specified local port and exchange manager.
+ */
+void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
+ struct fc_frame *fp);
+
+/*
+ * This function is for exch_seq_send function pointer in
+ * struct libfc_function_template, see comment block on
+ * exch_seq_send for description of this function.
+ */
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void *resp_arg, u32 timer_msec,
+ u32 sid, u32 did, u32 f_ctl);
+
+/*
+ * send a frame using existing sequence and exchange.
+ */
+int fc_seq_send(struct fc_lport *lp, struct fc_seq *sp,
+ struct fc_frame *fp, u32 f_ctl);
+
+/*
+ * Send ELS response using mainly infomation
+ * in exchange and sequence in EM layer.
+ */
+void fc_seq_els_rsp_send(struct fc_seq *sp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data);
+
+
+/*
+ * Abort the exchange used by the given sequence.
+ */
+int fc_seq_exch_abort(const struct fc_seq *req_sp);
+
+/*
+ * Indicate that an exchange/sequence tuple is complete and the memory
+ * allocated for the related objects may be freed.
+ */
+void fc_exch_done(struct fc_seq *sp);
+
+/*
+ * Assigns a EM and XID for a frame and then allocates
+ * a new exchange and sequence pair.
+ * The fp can be used to determine free XID.
+ */
+struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
+
+/*
+ * Allocate a new exchange and sequence pair.
+ * if ex_id is zero then next free exchange id
+ * from specified exchange manger mp will be assigned.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp, u16 ex_id);
+
+/*
+ * Start a new sequence on the same exchange as the supplied sequence.
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
+
+/*
+ * Reset an exchange manager, completing all sequences and exchanges.
+ * If s_id is non-zero, reset only exchanges originating from that FID.
+ * If d_id is non-zero, reset only exchanges sending to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
+
+/*
+ * Get exchange Ids of a sequence
+ */
+void fc_seq_get_xids(struct fc_seq *sp, u16 *oxid, u16 *rxid);
+
+/*
+ * Set REC data to a sequence
+ */
+void fc_seq_set_rec_data(struct fc_seq *sp, u32 rec_data);
+
+/**
+ * fc_functions_template
+ *****************************/
+void fc_attr_init(struct fc_lport *);
+void fc_get_host_port_id(struct Scsi_Host *shost);
+void fc_get_host_speed(struct Scsi_Host *shost);
+void fc_get_host_port_type(struct Scsi_Host *shost);
+void fc_get_host_fabric_name(struct Scsi_Host *shost);
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout);
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *);
+
+#endif /* _LIBFC_H_ */
^ permalink raw reply related [flat|nested] 4+ messages in thread* [PATCH 3/3] [RFC] fcoe: Fibre Channel over Ethernet
2008-08-08 23:26 [PATCH 0/3][RFC] libfc and fcoe Robert Love
2008-08-08 23:26 ` [PATCH 1/3] [RFC] FC protocol definition header files Robert Love
2008-08-08 23:26 ` [PATCH 2/3] [RFC] libfc: a modular software Fibre Channel implementation Robert Love
@ 2008-08-08 23:26 ` Robert Love
2 siblings, 0 replies; 4+ messages in thread
From: Robert Love @ 2008-08-08 23:26 UTC (permalink / raw)
To: linux-scsi
Encapsulation protocol for running Fibre Channel over Ethernet interfaces.
Creates virtual Fibre Channel host adapters using libfc.
Signed-off-by: Robert Love <robert.w.love@intel.com>
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Vasu Dev <vasu.dev@intel.com>
Signed-off-by: Yi Zou <yi.zou@intel.com>
Signed-off-by: Steve Ma <steve.ma@intel.com>
---
drivers/scsi/Kconfig | 6
drivers/scsi/Makefile | 1
drivers/scsi/fcoe/Makefile | 8 +
drivers/scsi/fcoe/fc_fcoe.h | 108 +++++++
drivers/scsi/fcoe/fcoe_def.h | 100 +++++++
drivers/scsi/fcoe/fcoe_dev.c | 633 ++++++++++++++++++++++++++++++++++++++++++
drivers/scsi/fcoe/fcoe_if.c | 504 +++++++++++++++++++++++++++++++++
drivers/scsi/fcoe/fcoeinit.c | 432 +++++++++++++++++++++++++++++
8 files changed, 1792 insertions(+), 0 deletions(-)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index ae5e574..07b0196 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -334,6 +334,12 @@ config LIBFC
---help---
Fibre Channel library module
+config FCOE
+ tristate "FCoE module"
+ depends on LIBFC
+ ---help---
+ Fibre Channel over Ethernet module
+
config ISCSI_TCP
tristate "iSCSI Initiator over TCP/IP"
depends on SCSI && INET
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 9158dc6..b0aa59e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_SCSI_SRP_ATTRS) += scsi_transport_srp.o
obj-$(CONFIG_SCSI_DH) += device_handler/
obj-$(CONFIG_LIBFC) += libfc/
+obj-$(CONFIG_FCOE) += fcoe/
obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
obj-$(CONFIG_SCSI_A4000T) += 53c700.o a4000t.o
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
new file mode 100644
index 0000000..342e2ad
--- /dev/null
+++ b/drivers/scsi/fcoe/Makefile
@@ -0,0 +1,8 @@
+# $Id: Makefile
+
+obj-$(CONFIG_FCOE) += fcoe.o
+
+fcoe-y := \
+ fcoe_dev.o \
+ fcoe_if.o \
+ fcoeinit.o
diff --git a/drivers/scsi/fcoe/fc_fcoe.h b/drivers/scsi/fcoe/fc_fcoe.h
new file mode 100644
index 0000000..b2e07ec
--- /dev/null
+++ b/drivers/scsi/fcoe/fc_fcoe.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_FCOE_H_
+#define _FC_FCOE_H_
+
+/*
+ * FCoE - Fibre Channel over Ethernet.
+ */
+
+/*
+ * The FCoE ethertype eventually goes in net/if_ether.h.
+ */
+#ifndef ETH_P_FCOE
+#define ETH_P_FCOE 0x8906 /* FCOE ether type */
+#endif
+
+/*
+ * FC_FCOE_OUI hasn't been standardized yet. XXX TBD.
+ */
+#ifndef FC_FCOE_OUI
+#define FC_FCOE_OUI 0x0efc00 /* upper 24 bits of FCOE dest MAC TBD */
+#endif
+
+/*
+ * The destination MAC address for the fabric login may get a different OUI.
+ * This isn't standardized yet.
+ */
+#ifndef FC_FCOE_FLOGI_MAC
+/* gateway MAC - TBD */
+#define FC_FCOE_FLOGI_MAC { 0x0e, 0xfc, 0x00, 0xff, 0xff, 0xfe }
+#endif
+
+#define FC_FCOE_VER 0 /* version */
+
+/*
+ * Ethernet Addresses based on FC S_ID and D_ID.
+ * Generated by FC_FCOE_OUI | S_ID/D_ID
+ */
+#define FC_FCOE_ENCAPS_ID(n) (((u64) FC_FCOE_OUI << 24) | (n))
+#define FC_FCOE_DECAPS_ID(n) ((n) >> 24)
+
+/*
+ * FCoE frame header - 14 bytes
+ *
+ * This is the August 2007 version of the FCoE header as defined by T11.
+ * This follows the VLAN header, which includes the ethertype.
+ */
+struct fcoe_hdr {
+ __u8 fcoe_ver; /* version field - upper 4 bits */
+ __u8 fcoe_resvd[12]; /* reserved - send zero and ignore */
+ __u8 fcoe_sof; /* start of frame per RFC 3643 */
+};
+
+#define FC_FCOE_DECAPS_VER(hp) ((hp)->fcoe_ver >> 4)
+#define FC_FCOE_ENCAPS_VER(hp, ver) ((hp)->fcoe_ver = (ver) << 4)
+
+/*
+ * FCoE CRC & EOF - 8 bytes.
+ */
+struct fcoe_crc_eof {
+ __le32 fcoe_crc32; /* CRC for FC packet */
+ __u8 fcoe_eof; /* EOF from RFC 3643 */
+ __u8 fcoe_resvd[3]; /* reserved - send zero and ignore */
+} __attribute__((packed));
+
+/*
+ * Store OUI + DID into MAC address field.
+ */
+static inline void fc_fcoe_set_mac(u8 *mac, u8 *did)
+{
+ mac[0] = (u8) (FC_FCOE_OUI >> 16);
+ mac[1] = (u8) (FC_FCOE_OUI >> 8);
+ mac[2] = (u8) FC_FCOE_OUI;
+ mac[3] = did[0];
+ mac[4] = did[1];
+ mac[5] = did[2];
+}
+
+/*
+ * VLAN header. This is also defined in linux/if_vlan.h, but for kernels only.
+ */
+struct fcoe_vlan_hdr {
+ __be16 vlan_tag; /* VLAN tag including priority */
+ __be16 vlan_ethertype; /* encapsulated ethertype ETH_P_FCOE */
+};
+
+#ifndef ETH_P_8021Q
+#define ETH_P_8021Q 0x8100
+#endif
+
+#endif /* _FC_FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_def.h b/drivers/scsi/fcoe/fcoe_def.h
new file mode 100644
index 0000000..defea60
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_def.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FCOE_DEF_H_
+#define _FCOE_DEF_H_
+
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+
+#include <scsi/libfc/libfc.h>
+
+#include "fc_fcoe.h"
+
+#define FCOE_DRIVER_NAME "fcoe" /* driver name for ioctls */
+#define FCOE_DRIVER_VENDOR "Open-FC.org" /* vendor name for ioctls */
+
+#define FCOE_MIN_FRAME 36
+#define FCOE_WORD_TO_BYTE 4
+
+/*
+ * this is the main common structure across all instance of fcoe driver.
+ * There is one to one mapping between hba struct and ethernet nic.
+ * list of hbas contains pointer to the hba struct, these structures are
+ * stored in this array using there corresponding if_index.
+ */
+
+struct fcoe_percpu_s {
+ int cpu;
+ struct task_struct *thread;
+ struct sk_buff_head fcoe_rx_list;
+ struct page *crc_eof_page;
+ int crc_eof_offset;
+};
+
+struct fcoe_info {
+ struct timer_list timer;
+ /*
+ * fcoe host list is protected by the following read/write lock
+ */
+ rwlock_t fcoe_hostlist_lock;
+ struct list_head fcoe_hostlist;
+
+ struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
+};
+
+struct fcoe_softc {
+ struct list_head list;
+ struct fc_lport *lp;
+ struct net_device *real_dev;
+ struct net_device *phys_dev; /* device with ethtool_ops */
+ struct packet_type fcoe_packet_type;
+ struct sk_buff_head fcoe_pending_queue;
+ u16 user_mfs; /* configured max frame size */
+
+ u8 dest_addr[ETH_ALEN];
+ u8 ctl_src_addr[ETH_ALEN];
+ u8 data_src_addr[ETH_ALEN];
+ /*
+ * fcoe protocol address learning related stuff
+ */
+ u16 flogi_oxid;
+ u8 flogi_progress;
+ u8 address_mode;
+};
+
+extern int debug_fcoe;
+extern struct fcoe_percpu_s *fcoe_percpu[];
+extern struct scsi_transport_template *fcoe_transport_template;
+int fcoe_percpu_receive_thread(void *arg);
+
+/*
+ * HBA transport ops prototypes
+ */
+extern struct fcoe_info fcoei;
+
+void fcoe_clean_pending_queue(struct fc_lport *fd);
+void fcoe_watchdog(ulong vp);
+int fcoe_destroy_interface(const char *ifname);
+int fcoe_create_interface(const char *ifname);
+int fcoe_xmit(struct fc_lport *, struct fc_frame *);
+int fcoe_rcv(struct sk_buff *, struct net_device *,
+ struct packet_type *, struct net_device *);
+int fcoe_link_ok(struct fc_lport *);
+#endif /* _FCOE_DEF_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_dev.c b/drivers/scsi/fcoe/fcoe_dev.c
new file mode 100644
index 0000000..4579a66
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_dev.c
@@ -0,0 +1,633 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * FCOE protocol file
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/kthread.h>
+#include <linux/crc32.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_encaps.h>
+
+#include <scsi/libfc/libfc.h>
+#include <scsi/libfc/fc_frame.h>
+
+#include "fc_fcoe.h"
+#include "fcoe_def.h"
+
+#define FCOE_MAX_QUEUE_DEPTH 256
+
+/* destination address mode */
+#define FCOE_GW_ADDR_MODE 0x00
+#define FCOE_FCOUI_ADDR_MODE 0x01
+
+/* Function Prototyes */
+static int fcoe_check_wait_queue(struct fc_lport *);
+static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
+static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
+static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
+
+/*
+ * this is the fcoe receive function
+ * called by NET_RX_SOFTIRQ
+ * this function will receive the packet and
+ * build fc frame and pass it up
+ */
+int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *olddev)
+{
+ struct fc_lport *lp;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *stats;
+ u8 *data;
+ struct fc_frame_header *fh;
+ unsigned short oxid;
+ int cpu_idx;
+ struct fcoe_percpu_s *fps;
+ struct fcoe_info *fci = &fcoei;
+
+ fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
+ lp = fc->lp;
+ if (unlikely(lp == NULL)) {
+ FC_DBG("cannot find hba structure");
+ goto err2;
+ }
+
+ if (unlikely(debug_fcoe)) {
+ FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p tail:%p "
+ "end:%p sum:%d dev:%s", skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
+
+ }
+
+ /* check for FCOE packet type */
+ if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
+ FC_DBG("wrong FC type frame");
+ goto err;
+ }
+ data = skb->data;
+ data += sizeof(struct fcoe_hdr);
+ fh = (struct fc_frame_header *)data;
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+ fr->fr_dev = lp;
+ fr->ptype = ptype;
+ cpu_idx = 0;
+#ifdef CONFIG_SMP
+ /*
+ * The exchange ID are ANDed with num of online CPUs,
+ * so that will have the least lock contention in
+ * handling the exchange. if there is no thread
+ * for a given idx then use first online cpu.
+ */
+ cpu_idx = oxid & (num_online_cpus() >> 1);
+ if (fci->fcoe_percpu[cpu_idx] == NULL)
+ cpu_idx = first_cpu(cpu_online_map);
+#endif
+ fps = fci->fcoe_percpu[cpu_idx];
+
+ spin_lock_bh(&fps->fcoe_rx_list.lock);
+ __skb_queue_tail(&fps->fcoe_rx_list, skb);
+ if (fps->fcoe_rx_list.qlen == 1)
+ wake_up_process(fps->thread);
+
+ spin_unlock_bh(&fps->fcoe_rx_list.lock);
+
+ return 0;
+err:
+#ifdef CONFIG_SMP
+ stats = lp->dev_stats[smp_processor_id()];
+#else
+ stats = lp->dev_stats[0];
+#endif
+ stats->ErrorFrames++;
+
+err2:
+ kfree_skb(skb);
+ return -1;
+}
+
+static inline int fcoe_start_io(struct sk_buff *skb)
+{
+ int rc;
+
+ skb_get(skb);
+ rc = dev_queue_xmit(skb);
+ if (rc != 0)
+ return rc;
+ kfree_skb(skb);
+ return 0;
+}
+
+static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
+{
+ struct fcoe_info *fci = &fcoei;
+ struct fcoe_percpu_s *fps;
+ struct page *page;
+ int cpu_idx;
+
+ cpu_idx = get_cpu();
+ fps = fci->fcoe_percpu[cpu_idx];
+ page = fps->crc_eof_page;
+ if (!page) {
+ page = alloc_page(GFP_ATOMIC);
+ if (!page) {
+ put_cpu();
+ return -ENOMEM;
+ }
+ fps->crc_eof_page = page;
+ WARN_ON(fps->crc_eof_offset != 0);
+ }
+
+ get_page(page);
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
+ fps->crc_eof_offset, tlen);
+ skb->len += tlen;
+ skb->data_len += tlen;
+ skb->truesize += tlen;
+ fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
+
+ if (fps->crc_eof_offset >= PAGE_SIZE) {
+ fps->crc_eof_page = NULL;
+ fps->crc_eof_offset = 0;
+ put_page(page);
+ }
+ put_cpu();
+ return 0;
+}
+
+/*
+ * this is the frame xmit routine
+ */
+int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
+{
+ int indx;
+ int wlen, rc = 0;
+ u32 crc;
+ struct ethhdr *eh;
+ struct fcoe_crc_eof *cp;
+ struct sk_buff *skb;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ unsigned int hlen; /* header length implies the version */
+ unsigned int tlen; /* trailer length */
+ int flogi_in_progress = 0;
+ struct fcoe_softc *fc;
+ void *data;
+ u8 sof, eof;
+ struct fcoe_hdr *hp;
+
+ WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
+
+ fc = (struct fcoe_softc *)lp->drv_priv;
+ /*
+ * if it is a flogi then we need to learn gw-addr
+ * and my own fcid
+ */
+ fh = fc_frame_header_get(fp);
+ if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
+ if (fc_frame_payload_op(fp) == ELS_FLOGI) {
+ fc->flogi_oxid = ntohs(fh->fh_ox_id);
+ fc->address_mode = FCOE_FCOUI_ADDR_MODE;
+ fc->flogi_progress = 1;
+ flogi_in_progress = 1;
+ } else if (fc->flogi_progress && ntoh24(fh->fh_s_id) != 0) {
+ /*
+ * Here we must've gotten an SID by accepting an FLOGI
+ * from a point-to-point connection. Switch to using
+ * the source mac based on the SID. The destination
+ * MAC in this case would have been set by receving the
+ * FLOGI.
+ */
+ fc_fcoe_set_mac(fc->data_src_addr, fh->fh_s_id);
+ fc->flogi_progress = 0;
+ }
+ }
+
+ skb = fp_skb(fp);
+ sof = fr_sof(fp);
+ eof = fr_eof(fp);
+
+ crc = ~0;
+ crc = crc32(crc, skb->data, skb_headlen(skb));
+
+ for (indx = 0; indx < skb_shinfo(skb)->nr_frags; indx++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[indx];
+ unsigned long off = frag->page_offset;
+ unsigned long len = frag->size;
+
+ while (len > 0) {
+ unsigned long clen;
+
+ clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
+ data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
+ KM_SKB_DATA_SOFTIRQ);
+ crc = crc32(crc, data + (off & ~PAGE_MASK),
+ clen);
+ kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
+ off += clen;
+ len -= clen;
+ }
+ }
+
+ /*
+ * Get header and trailer lengths.
+ * This is temporary code until we get rid of the old protocol.
+ * Both versions have essentially the same trailer layout but T11
+ * has padding afterwards.
+ */
+ hlen = sizeof(struct fcoe_hdr);
+ tlen = sizeof(struct fcoe_crc_eof);
+
+ /*
+ * copy fc crc and eof to the skb buff
+ * Use utility buffer in the fc_frame part of the sk_buff for the
+ * trailer.
+ * We don't do a get_page for this frag, since that page may not be
+ * managed that way. So that skb_free() doesn't do that either, we
+ * setup the destructor to remove this frag.
+ */
+ if (skb_is_nonlinear(skb)) {
+ skb_frag_t *frag;
+ if (fcoe_get_paged_crc_eof(skb, tlen)) {
+ kfree(skb);
+ return -ENOMEM;
+ }
+ frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
+ + frag->page_offset;
+ } else {
+ cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
+ }
+
+ cp->fcoe_eof = eof;
+ cp->fcoe_crc32 = cpu_to_le32(~crc);
+ if (tlen == sizeof(*cp))
+ memset(cp->fcoe_resvd, 0, sizeof(cp->fcoe_resvd));
+ wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
+
+ if (skb_is_nonlinear(skb)) {
+ kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
+ cp = NULL;
+ }
+
+ /*
+ * Fill in the control structures
+ */
+ skb->ip_summed = CHECKSUM_NONE;
+ eh = (struct ethhdr *)skb_push(skb, hlen + sizeof(struct ethhdr));
+ if (fc->address_mode == FCOE_FCOUI_ADDR_MODE)
+ fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
+ else
+ /* insert GW address */
+ memcpy(eh->h_dest, fc->dest_addr, ETH_ALEN);
+
+ if (unlikely(flogi_in_progress))
+ memcpy(eh->h_source, fc->ctl_src_addr, ETH_ALEN);
+ else
+ memcpy(eh->h_source, fc->data_src_addr, ETH_ALEN);
+
+ eh->h_proto = htons(ETH_P_FCOE);
+ skb->protocol = htons(ETH_P_802_3);
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ hp = (struct fcoe_hdr *)(eh + 1);
+ memset(hp, 0, sizeof(*hp));
+ if (FC_FCOE_VER)
+ FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
+ hp->fcoe_sof = sof;
+
+ stats = lp->dev_stats[smp_processor_id()];
+ stats->TxFrames++;
+ stats->TxWords += wlen;
+ skb->dev = fc->real_dev;
+
+ fr_dev(fp) = lp;
+ if (fc->fcoe_pending_queue.qlen)
+ rc = fcoe_check_wait_queue(lp);
+
+ if (rc == 0)
+ rc = fcoe_start_io(skb);
+
+ if (rc) {
+ fcoe_insert_wait_queue(lp, skb);
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ fc_pause(lp);
+ }
+
+ return 0;
+}
+
+int fcoe_percpu_receive_thread(void *arg)
+{
+ struct fcoe_percpu_s *p = arg;
+ u32 fr_len;
+ unsigned int hlen;
+ unsigned int tlen;
+ struct fc_lport *lp;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb;
+ struct fcoe_crc_eof *cp;
+ enum fc_sof sof;
+ struct fc_frame *fp;
+ u8 *mac = NULL;
+ struct fcoe_softc *fc;
+ struct fcoe_hdr *hp;
+
+ set_user_nice(current, 19);
+
+ while (!kthread_should_stop()) {
+
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ schedule();
+ set_current_state(TASK_RUNNING);
+ if (kthread_should_stop())
+ return 0;
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ }
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ fr = fcoe_dev_from_skb(skb);
+ lp = fr->fr_dev;
+ if (unlikely(lp == NULL)) {
+ FC_DBG("invalid HBA Structure");
+ kfree_skb(skb);
+ continue;
+ }
+
+ stats = lp->dev_stats[smp_processor_id()];
+
+ if (unlikely(debug_fcoe)) {
+ FC_DBG("skb_info: len:%d data_len:%d head:%p data:%p "
+ "tail:%p end:%p sum:%d dev:%s",
+ skb->len, skb->data_len,
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->csum,
+ skb->dev ? skb->dev->name : "<NULL>");
+ }
+
+ /*
+ * Save source MAC address before discarding header.
+ */
+ fc = lp->drv_priv;
+ if (unlikely(fc->flogi_progress))
+ mac = eth_hdr(skb)->h_source;
+
+ if (skb_is_nonlinear(skb))
+ skb_linearize(skb); /* not ideal */
+
+ /*
+ * Check the header and pull it off.
+ */
+ hlen = sizeof(struct fcoe_hdr);
+
+ hp = (struct fcoe_hdr *)skb->data;
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats->ErrorFrames < 5)
+ FC_DBG("unknown FCoE version %x",
+ FC_FCOE_DECAPS_VER(hp));
+ stats->ErrorFrames++;
+ kfree_skb(skb);
+ continue;
+ }
+ sof = hp->fcoe_sof;
+ skb_pull(skb, sizeof(*hp));
+ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ skb_trim(skb, fr_len);
+ tlen = sizeof(struct fcoe_crc_eof);
+
+ if (unlikely(fr_len > skb->len)) {
+ if (stats->ErrorFrames < 5)
+ FC_DBG("length error fr_len 0x%x skb->len 0x%x",
+ fr_len, skb->len);
+ stats->ErrorFrames++;
+ kfree_skb(skb);
+ continue;
+ }
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
+ fp = (struct fc_frame *) skb;
+ fc_frame_init(fp);
+ cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
+ fr_eof(fp) = cp->fcoe_eof;
+ fr_sof(fp) = sof;
+ fr_dev(fp) = lp;
+
+ /*
+ * Check the CRC here, unless it's solicited data for SCSI.
+ * In that case, the SCSI layer can check it during the copy,
+ * and it'll be more cache-efficient.
+ */
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
+ fh->fh_type == FC_TYPE_FCP) {
+ fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
+ fc_exch_recv(lp, lp->emp, fp);
+ } else if (le32_to_cpu(cp->fcoe_crc32) ==
+ ~crc32(~0, skb->data, fr_len)) {
+ if (unlikely(fc->flogi_progress))
+ fcoe_recv_flogi(fc, fp, mac);
+ fc_exch_recv(lp, lp->emp, fp);
+ } else {
+ if (debug_fcoe || stats->InvalidCRCCount < 5) {
+ printk(KERN_WARNING \
+ "fcoe: dropping frame with CRC error");
+ }
+ stats->InvalidCRCCount++;
+ stats->ErrorFrames++;
+ fc_frame_free(fp);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Snoop potential response to FLOGI or even incoming FLOGI.
+ */
+static void fcoe_recv_flogi(struct fcoe_softc *fc, struct fc_frame *fp, u8 *sa)
+{
+ struct fc_frame_header *fh;
+ u8 op;
+
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_ELS)
+ return;
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
+ fc->flogi_oxid == ntohs(fh->fh_ox_id)) {
+ /*
+ * FLOGI accepted.
+ * If the src mac addr is FC_OUI-based, then we mark the
+ * address_mode flag to use FC_OUI-based Ethernet DA.
+ * Otherwise we use the FCoE gateway addr
+ */
+ if (!compare_ether_addr(sa, (u8[6]) FC_FCOE_FLOGI_MAC)) {
+ fc->address_mode = FCOE_FCOUI_ADDR_MODE;
+ } else {
+ memcpy(fc->dest_addr, sa, ETH_ALEN);
+ fc->address_mode = FCOE_GW_ADDR_MODE;
+ }
+
+ /*
+ * Remove any previously-set unicast MAC filter.
+ * Add secondary FCoE MAC address filter for our OUI.
+ */
+ rtnl_lock();
+ if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
+ dev_unicast_delete(fc->real_dev, fc->data_src_addr,
+ ETH_ALEN);
+ fc_fcoe_set_mac(fc->data_src_addr, fh->fh_d_id);
+ dev_unicast_add(fc->real_dev, fc->data_src_addr, ETH_ALEN);
+ rtnl_unlock();
+
+ fc->flogi_progress = 0;
+ } else if (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) {
+ /*
+ * Save source MAC for point-to-point responses.
+ */
+ memcpy(fc->dest_addr, sa, ETH_ALEN);
+ fc->address_mode = FCOE_GW_ADDR_MODE;
+ }
+}
+
+void fcoe_watchdog(ulong vp)
+{
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+ struct fcoe_info *fci = &fcoei;
+ int paused = 0;
+
+ read_lock(&fci->fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
+ lp = fc->lp;
+ if (lp) {
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ paused = 1;
+ if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
+ if (paused)
+ fc_unpause(lp);
+ }
+ }
+ }
+ read_unlock(&fci->fcoe_hostlist_lock);
+
+ fci->timer.expires = jiffies + (1 * HZ);
+ add_timer(&fci->timer);
+}
+
+/*
+ * the wait_queue is used when the skb transmit fails. skb will go
+ * in the wait_queue which will be emptied by the time function OR
+ * by the next skb transmit.
+ *
+ */
+
+/*
+ * Function name : fcoe_check_wait_queue()
+ *
+ * Return Values : 0 or error
+ *
+ * Description : empties the wait_queue
+ * dequeue the head of the wait_queue queue and
+ * calls fcoe_start_io() for each packet
+ * if all skb have been transmitted, return 0
+ * if a error occurs, then restore wait_queue and try again
+ * later
+ *
+ */
+
+static int fcoe_check_wait_queue(struct fc_lport *lp)
+{
+ int rc, unpause = 0;
+ int paused = 0;
+ struct sk_buff *skb;
+ struct fcoe_softc *fc;
+
+ fc = (struct fcoe_softc *)lp->drv_priv;
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+ /*
+ * is this interface paused?
+ */
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+ paused = 1;
+ if (fc->fcoe_pending_queue.qlen) {
+ while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ if (rc) {
+ fcoe_insert_wait_queue_head(lp, skb);
+ return rc;
+ }
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ }
+ if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
+ unpause = 1;
+ }
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ if ((unpause) && (paused))
+ fc_unpause(lp);
+ return fc->fcoe_pending_queue.qlen;
+}
+
+static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
+ struct sk_buff *skb)
+{
+ struct fcoe_softc *fc;
+
+ fc = (struct fcoe_softc *)lp->drv_priv;
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ __skb_queue_head(&fc->fcoe_pending_queue, skb);
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
+
+static void fcoe_insert_wait_queue(struct fc_lport *lp,
+ struct sk_buff *skb)
+{
+ struct fcoe_softc *fc;
+
+ fc = (struct fcoe_softc *)lp->drv_priv;
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ __skb_queue_tail(&fc->fcoe_pending_queue, skb);
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
diff --git a/drivers/scsi/fcoe/fcoe_if.c b/drivers/scsi/fcoe/fcoe_if.c
new file mode 100644
index 0000000..b5a32c6
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_if.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * FCOE protocol file
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/rtnetlink.h>
+
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_encaps.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include <scsi/libfc/libfc.h>
+
+#include "fc_fcoe.h"
+#include "fcoe_def.h"
+
+#define FCOE_VERSION "0.1"
+
+#define FCOE_MAX_LUN 255
+#define FCOE_MAX_FCP_TARGET 256
+
+#define FCOE_MIN_XID 0x0004
+#define FCOE_MAX_XID 0x07ef
+
+int debug_fcoe;
+
+struct fcoe_info fcoei = {
+ .fcoe_hostlist = LIST_HEAD_INIT(fcoei.fcoe_hostlist),
+};
+
+static struct fcoe_softc *fcoe_find_fc_lport(const char *name)
+{
+ struct fcoe_softc *fc;
+ struct fc_lport *lp;
+ struct fcoe_info *fci = &fcoei;
+
+ read_lock(&fci->fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
+ lp = fc->lp;
+ if (!strncmp(name, lp->ifname, IFNAMSIZ)) {
+ read_unlock(&fci->fcoe_hostlist_lock);
+ return fc;
+ }
+ }
+ read_unlock(&fci->fcoe_hostlist_lock);
+ return NULL;
+}
+
+/*
+ * Convert 48-bit IEEE MAC address to 64-bit FC WWN.
+ */
+static u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
+ unsigned int scheme, unsigned int port)
+{
+ u64 wwn;
+ u64 host_mac;
+
+ /* The MAC is in NO, so flip only the low 48 bits */
+ host_mac = ((u64) mac[0] << 40) |
+ ((u64) mac[1] << 32) |
+ ((u64) mac[2] << 24) |
+ ((u64) mac[3] << 16) |
+ ((u64) mac[4] << 8) |
+ (u64) mac[5];
+
+ WARN_ON(host_mac >= (1ULL << 48));
+ wwn = host_mac | ((u64) scheme << 60);
+ switch (scheme) {
+ case 1:
+ WARN_ON(port != 0);
+ break;
+ case 2:
+ WARN_ON(port >= 0xfff);
+ wwn |= (u64) port << 48;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return wwn;
+}
+
+static struct scsi_host_template fcoe_driver_template = {
+ .module = THIS_MODULE,
+ .name = "FCoE Driver",
+ .proc_name = FCOE_DRIVER_NAME,
+ .queuecommand = fc_queuecommand,
+ .eh_abort_handler = fc_eh_abort,
+ .eh_device_reset_handler = fc_eh_device_reset,
+ .eh_host_reset_handler = fc_eh_host_reset,
+ .slave_alloc = fc_slave_alloc,
+ .change_queue_depth = fc_change_queue_depth,
+ .change_queue_type = fc_change_queue_type,
+ .this_id = -1,
+ .cmd_per_lun = 32,
+ .can_queue = FC_MAX_OUTSTANDING_COMMANDS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .sg_tablesize = 4,
+ .max_sectors = 0xffff,
+};
+
+int fcoe_destroy_interface(const char *ifname)
+{
+ int cpu, idx;
+ struct fcoe_dev_stats *p;
+ struct fcoe_percpu_s *pp;
+ struct fcoe_softc *fc;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_info *fci = &fcoei;
+ struct sk_buff_head *list;
+ struct sk_buff *skb, *next;
+ struct sk_buff *head;
+ struct fc_lport *lp;
+ u8 flogi_maddr[ETH_ALEN];
+
+ fc = fcoe_find_fc_lport(ifname);
+ if (!fc)
+ return -ENODEV;
+
+ lp = fc->lp;
+
+ /* Remove the instance from fcoe's list */
+ write_lock_bh(&fci->fcoe_hostlist_lock);
+ list_del(&fc->list);
+ write_unlock_bh(&fci->fcoe_hostlist_lock);
+
+ /* Cleanup the fc_lport */
+ fc_lport_destroy(lp);
+ fc_fcp_destroy(lp);
+ if (lp->emp)
+ fc_exch_mgr_free(lp->emp);
+
+ /* Detach from the scsi-ml */
+ fc_remove_host(lp->host);
+ scsi_remove_host(lp->host);
+
+ /* Don't listen for Ethernet packets anymore */
+ dev_remove_pack(&fc->fcoe_packet_type);
+
+ /* Delete secondary MAC addresses */
+ rtnl_lock();
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_unicast_delete(fc->real_dev, flogi_maddr, ETH_ALEN);
+ if (compare_ether_addr(fc->data_src_addr, (u8[6]) { 0 }))
+ dev_unicast_delete(fc->real_dev, fc->data_src_addr, ETH_ALEN);
+ rtnl_unlock();
+
+ /* Free the per-CPU revieve threads */
+ for (idx = 0; idx < NR_CPUS; idx++) {
+ if (fci->fcoe_percpu[idx]) {
+ pp = fci->fcoe_percpu[idx];
+ spin_lock_bh(&pp->fcoe_rx_list.lock);
+ list = &pp->fcoe_rx_list;
+ head = list->next;
+ for (skb = head; skb != (struct sk_buff *)list;
+ skb = next) {
+ next = skb->next;
+ fr = fcoe_dev_from_skb(skb);
+ if (fr->fr_dev == fc->lp) {
+ __skb_unlink(skb, list);
+ kfree_skb(skb);
+ }
+ }
+ spin_unlock_bh(&pp->fcoe_rx_list.lock);
+ }
+ }
+
+ /* Free existing skbs */
+ fcoe_clean_pending_queue(lp);
+
+ /* Free memory used by statistical counters */
+ for_each_online_cpu(cpu) {
+ p = lp->dev_stats[cpu];
+ if (p) {
+ lp->dev_stats[cpu] = NULL;
+ kfree(p);
+ }
+ }
+
+ /* Release the net_device and Scsi_Host */
+ dev_put(fc->real_dev);
+ scsi_host_put(lp->host);
+ return 0;
+}
+
+/*
+ * Return zero if link is OK for use by FCoE.
+ * Any permanently-disqualifying conditions have been previously checked.
+ * This checks pause settings, which can change with link.
+ * This also updates the speed setting, which may change with link for 100/1000.
+ */
+int fcoe_link_ok(struct fc_lport *lp)
+{
+ struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
+ struct net_device *dev = fc->real_dev;
+ struct ethtool_pauseparam pause = { ETHTOOL_GPAUSEPARAM };
+ struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+ int rc = 0;
+
+ if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
+ dev = fc->phys_dev;
+ dev->ethtool_ops->get_pauseparam(dev, &pause);
+ if (dev->ethtool_ops->get_settings) {
+ dev->ethtool_ops->get_settings(dev, &ecmd);
+ lp->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lp->link_supported_speeds |=
+ FC_PORTSPEED_10GBIT;
+ if (ecmd.speed == SPEED_1000)
+ lp->link_speed = FC_PORTSPEED_1GBIT;
+ if (ecmd.speed == SPEED_10000)
+ lp->link_speed = FC_PORTSPEED_10GBIT;
+
+ /*
+ * for 10 G (and faster), ignore autoneg requirement.
+ */
+ if (ecmd.speed >= SPEED_10000)
+ pause.autoneg = 1;
+ }
+ if (!pause.autoneg || !pause.tx_pause || !pause.rx_pause)
+ rc = -1;
+ } else
+ rc = -1;
+
+ return rc;
+}
+
+static struct libfc_function_template fcoe_libfc_fcn_templ = {
+ .frame_send = fcoe_xmit,
+};
+
+static int lport_config(struct fc_lport *lp, struct Scsi_Host *shost)
+{
+ int i = 0;
+ struct fcoe_dev_stats *p;
+
+ lp->host = shost;
+ lp->drv_priv = (void *)(lp + 1);
+
+ lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+ FCOE_MIN_XID, FCOE_MAX_XID, 0);
+ if (!lp->emp)
+ return -ENOMEM;
+
+ lp->link_status = 0;
+ lp->max_retry_count = 3;
+ lp->e_d_tov = 2 * 1000; /* FC-FS default */
+ lp->r_a_tov = 2 * 2 * 1000;
+ lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
+ FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
+
+ /*
+ * allocate per cpu stats block
+ */
+ for_each_online_cpu(i) {
+ p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
+ if (p)
+ lp->dev_stats[i] = p;
+ }
+
+ /* Finish fc_lport configuration */
+ fc_lport_config(lp);
+
+ return 0;
+}
+
+static int net_config(struct fc_lport *lp)
+{
+ u32 mfs;
+ u64 wwnn, wwpn;
+ struct net_device *net_dev;
+ struct fcoe_softc *fc = (struct fcoe_softc *)lp->drv_priv;
+ u8 flogi_maddr[ETH_ALEN];
+
+ /* Require support for get_pauseparam ethtool op. */
+ net_dev = fc->real_dev;
+ if (!net_dev->ethtool_ops && (net_dev->priv_flags & IFF_802_1Q_VLAN))
+ net_dev = vlan_dev_real_dev(net_dev);
+ if (!net_dev->ethtool_ops || !net_dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ fc->phys_dev = net_dev;
+
+ /* Do not support for bonding device */
+ if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
+ (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
+ (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Determine max frame size based on underlying device and optional
+ * user-configured limit. If the MFS is too low, fcoe_link_ok()
+ * will return 0, so do this first.
+ */
+ mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ fc_set_mfs(lp, mfs);
+
+ lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+ lp->link_status |= FC_LINK_UP;
+
+ if (fc->real_dev->features & NETIF_F_SG)
+ lp->capabilities = TRANS_C_SG;
+
+
+ skb_queue_head_init(&fc->fcoe_pending_queue);
+
+ memcpy(lp->ifname, fc->real_dev->name, IFNAMSIZ);
+
+ /* setup Source Mac Address */
+ memcpy(fc->ctl_src_addr, fc->real_dev->dev_addr,
+ fc->real_dev->addr_len);
+
+ wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
+ fc_set_wwnn(lp, wwnn);
+ /* XXX - 3rd arg needs to be vlan id */
+ wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
+ fc_set_wwpn(lp, wwpn);
+
+ /*
+ * Add FCoE MAC address as second unicast MAC address
+ * or enter promiscuous mode if not capable of listening
+ * for multiple unicast MACs.
+ */
+ rtnl_lock();
+ memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+ dev_unicast_add(fc->real_dev, flogi_maddr, ETH_ALEN);
+ rtnl_unlock();
+
+ /*
+ * setup the receive function from ethernet driver
+ * on the ethertype for the given device
+ */
+ fc->fcoe_packet_type.func = fcoe_rcv;
+ fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+ fc->fcoe_packet_type.dev = fc->real_dev;
+ dev_add_pack(&fc->fcoe_packet_type);
+
+ return 0;
+}
+
+static void shost_config(struct fc_lport *lp)
+{
+ lp->host->max_lun = FCOE_MAX_LUN;
+ lp->host->max_id = FCOE_MAX_FCP_TARGET;
+ lp->host->max_channel = 0;
+ lp->host->transportt = fcoe_transport_template;
+}
+
+static int libfc_config(struct fc_lport *lp)
+{
+ /* Set the function pointers set by the LLDD */
+ memcpy(&lp->tt, &fcoe_libfc_fcn_templ,
+ sizeof(struct libfc_function_template));
+
+ if (fc_fcp_init(lp))
+ return -ENOMEM;
+ fc_exch_init(lp);
+ fc_lport_init(lp);
+ fc_rport_init(lp);
+ fc_ns_init(lp);
+ fc_attr_init(lp);
+
+ return 0;
+}
+
+/*
+ * This function creates the fcoe interface
+ * create struct fcdev which is a shared structure between opefc
+ * and transport level protocol.
+ */
+int fcoe_create_interface(const char *ifname)
+{
+ struct fc_lport *lp = NULL;
+ struct fcoe_softc *fc;
+ struct net_device *net_dev;
+ struct Scsi_Host *shost;
+ struct fcoe_info *fci = &fcoei;
+ int rc = 0;
+
+ net_dev = dev_get_by_name(&init_net, ifname);
+ if (net_dev == NULL) {
+ FC_DBG("could not get network device for %s",
+ ifname);
+ return -ENODEV;
+ }
+
+ if (fcoe_find_fc_lport(net_dev->name) != NULL) {
+ rc = -EEXIST;
+ goto out_put_dev;
+ }
+
+ shost = scsi_host_alloc(&fcoe_driver_template,
+ sizeof(struct fc_lport) +
+ sizeof(struct fcoe_softc));
+
+ if (!shost) {
+ FC_DBG("Could not allocate host structure\n");
+ rc = -ENOMEM;
+ goto out_put_dev;
+ }
+
+ lp = shost_priv(shost);
+ rc = lport_config(lp, shost);
+ if (rc)
+ goto out_host_put;
+
+ /* Configure the fcoe_softc */
+ fc = (struct fcoe_softc *)lp->drv_priv;
+ fc->lp = lp;
+ fc->real_dev = net_dev;
+ shost_config(lp);
+
+
+ /* Add the new host to the SCSI-ml */
+ rc = scsi_add_host(lp->host, NULL);
+ if (rc) {
+ FC_DBG("error on scsi_add_host\n");
+ goto out_lp_destroy;
+ }
+
+ sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
+ FCOE_DRIVER_NAME, FCOE_VERSION,
+ ifname);
+
+ /* Configure netdev and networking properties of the lp */
+ rc = net_config(lp);
+ if (rc)
+ goto out_lp_destroy;
+
+ /* Initialize the library */
+ rc = libfc_config(lp);
+ if (rc)
+ goto out_lp_destroy;
+
+ write_lock_bh(&fci->fcoe_hostlist_lock);
+ list_add_tail(&fc->list, &fci->fcoe_hostlist);
+ write_unlock_bh(&fci->fcoe_hostlist_lock);
+
+ lp->boot_time = jiffies;
+
+ fc_fabric_login(lp);
+
+ return rc;
+
+out_lp_destroy:
+ fc_exch_mgr_free(lp->emp); /* Free the EM */
+out_host_put:
+ scsi_host_put(lp->host);
+out_put_dev:
+ dev_put(net_dev);
+ return rc;
+}
+
+void fcoe_clean_pending_queue(struct fc_lport *lp)
+{
+ struct fcoe_softc *fc = lp->drv_priv;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ kfree_skb(skb);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ }
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+}
diff --git a/drivers/scsi/fcoe/fcoeinit.c b/drivers/scsi/fcoe/fcoeinit.c
new file mode 100644
index 0000000..375699f
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoeinit.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/ctype.h>
+
+#include <scsi/libfc/libfc.h>
+
+#include "fcoe_def.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("FCoE");
+MODULE_LICENSE("GPL");
+
+/*
+ * Static functions and variables definations
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
+#endif /* CONFIG_HOTPLUG_CPU */
+static int fcoe_device_notification(struct notifier_block *, ulong, void *);
+static void fcoe_dev_setup(void);
+static void fcoe_dev_cleanup(void);
+
+struct scsi_transport_template *fcoe_transport_template;
+
+static int fcoe_reset(struct Scsi_Host *shost)
+{
+ struct fc_lport *lp = shost_priv(shost);
+ fc_lport_enter_reset(lp);
+ return 0;
+}
+
+struct fc_function_template fcoe_transport_function = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_active_fc4s = 1,
+
+ .get_host_port_id = fc_get_host_port_id,
+ .show_host_port_id = 1,
+ .get_host_speed = fc_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_port_type = fc_get_host_port_type,
+ .show_host_port_type = 1,
+ .show_host_symbolic_name = 1,
+
+ .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .get_host_fabric_name = fc_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+ .show_starget_port_id = 1,
+ .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+ .get_fc_host_stats = fc_get_host_stats,
+ .issue_fc_host_lip = fcoe_reset,
+};
+
+struct fcoe_percpu_s *fcoe_percpu[NR_CPUS];
+
+#ifdef CONFIG_HOTPLUG_CPU
+static struct notifier_block fcoe_cpu_notifier = {
+ .notifier_call = fcoe_cpu_callback,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * notification function from net device
+ */
+static struct notifier_block fcoe_notifier = {
+ .notifier_call = fcoe_device_notification,
+};
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * create percpu stats block
+ * called by cpu add/remove notifier
+ */
+static void fcoe_create_percpu_data(int cpu)
+{
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *p;
+ struct fcoe_info *fci = &fcoei;
+
+ write_lock_bh(&fci->fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
+ lp = fc->lp;
+ if (lp->dev_stats[cpu] == NULL) {
+ p = kzalloc(sizeof(struct fcoe_dev_stats), GFP_KERNEL);
+ if (p)
+ lp->dev_stats[cpu] = p;
+ }
+ }
+ write_unlock_bh(&fci->fcoe_hostlist_lock);
+}
+
+/*
+ * destroy percpu stats block
+ * called by cpu add/remove notifier
+ */
+static void fcoe_destroy_percpu_data(int cpu)
+{
+ struct fcoe_dev_stats *p;
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+ struct fcoe_info *fci = &fcoei;
+
+ write_lock_bh(&fci->fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
+ lp = fc->lp;
+ p = lp->dev_stats[cpu];
+ if (p != NULL) {
+ lp->dev_stats[cpu] = NULL;
+ kfree(p);
+ }
+ }
+ write_unlock_bh(&fci->fcoe_hostlist_lock);
+}
+
+/*
+ * Get notified when a cpu comes on/off. Be hotplug friendly.
+ */
+static int fcoe_cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ fcoe_create_percpu_data(cpu);
+ break;
+ case CPU_DEAD:
+ fcoe_destroy_percpu_data(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * function to setup link change notification interface
+ */
+static void fcoe_dev_setup(void)
+{
+ /*
+ * here setup a interface specific wd time to
+ * monitor the link state
+ */
+ register_netdevice_notifier(&fcoe_notifier);
+}
+
+/*
+ * function to cleanup link change notification interface
+ */
+static void fcoe_dev_cleanup(void)
+{
+ unregister_netdevice_notifier(&fcoe_notifier);
+}
+
+/*
+ * This function is called by the ethernet driver
+ * this is called in case of link change event
+ */
+static int fcoe_device_notification(struct notifier_block *notifier,
+ ulong event, void *ptr)
+{
+ struct fc_lport *lp = NULL;
+ struct net_device *real_dev = ptr;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *stats;
+ struct fcoe_info *fci = &fcoei;
+ u16 new_status;
+ u32 mfs;
+ int rc = NOTIFY_OK;
+
+ read_lock(&fci->fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fci->fcoe_hostlist, list) {
+ if (fc->real_dev == real_dev) {
+ lp = fc->lp;
+ break;
+ }
+ }
+ read_unlock(&fci->fcoe_hostlist_lock);
+ if (lp == NULL) {
+ rc = NOTIFY_DONE;
+ goto out;
+ }
+
+ new_status = lp->link_status;
+ switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_GOING_DOWN:
+ new_status &= ~FC_LINK_UP;
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ new_status &= ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+ new_status |= FC_LINK_UP;
+ break;
+ case NETDEV_CHANGEMTU:
+ mfs = fc->real_dev->mtu -
+ (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+ if (fc->user_mfs && fc->user_mfs < mfs)
+ mfs = fc->user_mfs;
+ if (mfs >= FC_MIN_MAX_FRAME)
+ fc_set_mfs(lp, mfs);
+ new_status &= ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+ new_status |= FC_LINK_UP;
+ break;
+ case NETDEV_REGISTER:
+ break;
+ default:
+ FC_DBG("unknown event %ld call", event);
+ }
+ if (lp->link_status != new_status) {
+ lp->link_status = new_status;
+ if ((new_status & FC_LINK_UP) == FC_LINK_UP) {
+ fc_linkup(lp);
+ } else {
+ stats = lp->dev_stats[smp_processor_id()];
+ stats->LinkFailureCount++;
+ fc_linkdown(lp);
+ fcoe_clean_pending_queue(lp);
+ }
+ }
+out:
+ return rc;
+}
+
+static void trimstr(char *str, int len)
+{
+ char *cp = str + len;
+ while (--cp >= str && *cp == '\n')
+ *cp = '\0';
+}
+
+static ssize_t fcoe_destroy(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buffer, size_t size)
+{
+ char ifname[40];
+ strcpy(ifname, buffer);
+ trimstr(ifname, strlen(ifname));
+ fcoe_destroy_interface(ifname);
+ return size;
+}
+
+static ssize_t fcoe_create(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buffer, size_t size)
+{
+ char ifname[40];
+ strcpy(ifname, buffer);
+ trimstr(ifname, strlen(ifname));
+ fcoe_create_interface(ifname);
+ return size;
+}
+
+static const struct kobj_attribute fcoe_destroyattr = \
+ __ATTR(destroy, S_IWUSR, NULL, fcoe_destroy);
+static const struct kobj_attribute fcoe_createattr = \
+ __ATTR(create, S_IWUSR, NULL, fcoe_create);
+
+/*
+ * Initialization routine
+ * 1. Will create fc transport software structure
+ * 2. initialize the link list of port information structure
+ */
+static int __init fcoeinit(void)
+{
+ int rc = 0;
+ int cpu;
+ struct fcoe_percpu_s *p;
+ struct fcoe_info *fci = &fcoei;
+
+ sysfs_create_file(&THIS_MODULE->mkobj.kobj, &fcoe_destroyattr.attr);
+ sysfs_create_file(&THIS_MODULE->mkobj.kobj, &fcoe_createattr.attr);
+
+ rwlock_init(&fci->fcoe_hostlist_lock);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ register_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+ /*
+ * initialize per CPU interrupt thread
+ */
+ for_each_online_cpu(cpu) {
+ p = kzalloc(sizeof(struct fcoe_percpu_s), GFP_KERNEL);
+ if (p) {
+ p->thread = kthread_create(fcoe_percpu_receive_thread,
+ (void *)p,
+ "fcoethread/%d", cpu);
+
+ /*
+ * if there is no error then bind the thread to the cpu
+ * initialize the semaphore and skb queue head
+ */
+ if (likely(!IS_ERR(p->thread))) {
+ p->cpu = cpu;
+ fci->fcoe_percpu[cpu] = p;
+ skb_queue_head_init(&p->fcoe_rx_list);
+ kthread_bind(p->thread, cpu);
+ wake_up_process(p->thread);
+ } else {
+ fci->fcoe_percpu[cpu] = NULL;
+ kfree(p);
+
+ }
+ }
+ }
+ if (rc < 0) {
+ FC_DBG("failed to initialize proc intrerface\n");
+ rc = -ENODEV;
+ goto out_chrdev;
+ }
+
+ /*
+ * setup link change notification
+ */
+ fcoe_dev_setup();
+
+ init_timer(&fci->timer);
+ fci->timer.data = (ulong) fci;
+ fci->timer.function = fcoe_watchdog;
+ fci->timer.expires = (jiffies + (10 * HZ));
+ add_timer(&fci->timer);
+
+ fcoe_transport_template =
+ fc_attach_transport(&fcoe_transport_function);
+
+ if (fcoe_transport_template == NULL) {
+ FC_DBG("fail to attach fc transport");
+ return -1;
+ }
+
+ return 0;
+
+out_chrdev:
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+ return rc;
+}
+
+static void __exit fcoe_exit(void)
+{
+ u32 idx;
+ struct fcoe_softc *fc, *tmp;
+ struct fc_lport *lp;
+ struct fcoe_info *fci = &fcoei;
+ struct fcoe_percpu_s *p;
+ struct sk_buff *skb;
+
+ /*
+ * Stop all call back interfaces
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+ unregister_cpu_notifier(&fcoe_cpu_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
+ fcoe_dev_cleanup();
+
+ /*
+ * stop timer
+ */
+ del_timer_sync(&fci->timer);
+
+ /*
+ * assuming that at this time there will be no
+ * ioctl in prograss, therefore we do not need to lock the
+ * list.
+ */
+ list_for_each_entry_safe(fc, tmp, &fci->fcoe_hostlist, list) {
+ lp = fc->lp;
+ fcoe_destroy_interface(lp->ifname);
+ }
+
+ for (idx = 0; idx < NR_CPUS; idx++) {
+ if (fci->fcoe_percpu[idx]) {
+ kthread_stop(fci->fcoe_percpu[idx]->thread);
+ p = fci->fcoe_percpu[idx];
+ spin_lock_bh(&p->fcoe_rx_list.lock);
+ while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
+ kfree_skb(skb);
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+ if (fci->fcoe_percpu[idx]->crc_eof_page)
+ put_page(fci->fcoe_percpu[idx]->crc_eof_page);
+ kfree(fci->fcoe_percpu[idx]);
+ }
+ }
+
+ fc_release_transport(fcoe_transport_template);
+}
+
+module_init(fcoeinit);
+module_exit(fcoe_exit);
^ permalink raw reply related [flat|nested] 4+ messages in thread