From mboxrd@z Thu Jan 1 00:00:00 1970 From: sebastien dugue Subject: [PATCH 2/3] opensm/qos_policy: Add a new service ID and keyword for Lustre QoS Date: Wed, 13 Jan 2010 15:54:42 +0100 Message-ID: <20100113155442.28930e5c@frecb007965> References: <20100113154952.0f01aa1d@frecb007965> Mime-Version: 1.0 Content-Type: text/plain; charset=US-ASCII Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <20100113154952.0f01aa1d@frecb007965> Sender: linux-rdma-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org To: linux-rdma Cc: Roland Dreier , Sasha Khapyorsky List-Id: linux-rdma@vger.kernel.org This patch adds a new service ID for the Lustre ULP as well as the lexer and parser bits for the policy definition simplified syntax. The QoS policy parser now supports the following 'qos-ulps' keywords: lustre : 0 #default SL for Lustre lustre, target-port-guid 0x1234 : 1 #SL for Lustre with target port guid lustre, port-num 10000-20000 : 2 #SL for Lustre port range This, along with patches to the kernel rdma cma and Lustre, allow to define a specific QoS for lustre. Signed-off-by: Sebastien Dugue --- opensm/include/opensm/osm_qos_policy.h | 11 ++-- opensm/opensm/osm_qos_parser_l.l | 8 +++ opensm/opensm/osm_qos_parser_y.y | 108 ++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+), 5 deletions(-) diff --git a/opensm/include/opensm/osm_qos_policy.h b/opensm/include/opensm/osm_qos_policy.h index 03ee891..ea3998a 100644 --- a/opensm/include/opensm/osm_qos_policy.h +++ b/opensm/include/opensm/osm_qos_policy.h @@ -54,11 +54,12 @@ #define OSM_QOS_POLICY_MAX_PORTS_ON_SWITCH 128 #define OSM_QOS_POLICY_DEFAULT_LEVEL_NAME "default" -#define OSM_QOS_POLICY_ULP_SDP_SERVICE_ID 0x0000000000010000ULL -#define OSM_QOS_POLICY_ULP_RDS_SERVICE_ID 0x0000000001060000ULL -#define OSM_QOS_POLICY_ULP_RDS_PORT 0x48CA -#define OSM_QOS_POLICY_ULP_ISER_SERVICE_ID 0x0000000001060000ULL -#define OSM_QOS_POLICY_ULP_ISER_PORT 0x0CBC +#define OSM_QOS_POLICY_ULP_SDP_SERVICE_ID 0x0000000000010000ULL +#define OSM_QOS_POLICY_ULP_RDS_SERVICE_ID 0x0000000001060000ULL +#define OSM_QOS_POLICY_ULP_RDS_PORT 0x48CA +#define OSM_QOS_POLICY_ULP_ISER_SERVICE_ID 0x0000000001060000ULL +#define OSM_QOS_POLICY_ULP_ISER_PORT 0x0CBC +#define OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID 0x0000000001530000ULL #define OSM_QOS_POLICY_NODE_TYPE_CA (((uint8_t)1)<pkey_range_len = range_len; } qos_ulp_sl + + | qos_ulp_type_lustre_default { + /* "lustre : sl" - default SL for Lustre */ + uint64_t ** range_arr = + (uint64_t **)malloc(sizeof(uint64_t *)); + range_arr[0] = (uint64_t *)malloc(2*sizeof(uint64_t)); + range_arr[0][0] = OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID; + range_arr[0][1] = OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID + 0xFFFF; + + p_current_qos_match_rule->service_id_range_arr = range_arr; + p_current_qos_match_rule->service_id_range_len = 1; + + } qos_ulp_sl + + | qos_ulp_type_lustre_port list_of_ranges TK_DOTDOT { + /* Lustre with port numbers */ + uint64_t ** range_arr; + unsigned range_len; + unsigned i; + + if (!cl_list_count(&tmp_parser_struct.num_pair_list)) + { + yyerror("Lustre ULP rule doesn't have port numbers"); + return 1; + } + + /* get all the port ranges */ + __rangelist2rangearr( &tmp_parser_struct.num_pair_list, + &range_arr, + &range_len ); + /* now translate these port numbers into service ids */ + for (i = 0; i < range_len; i++) + { + if (range_arr[i][0] > 0xFFFF || range_arr[i][1] > 0xFFFF) + { + yyerror("Lustre port number out of range"); + return 1; + } + range_arr[i][0] += OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID; + range_arr[i][1] += OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID; + } + + p_current_qos_match_rule->service_id_range_arr = range_arr; + p_current_qos_match_rule->service_id_range_len = range_len; + + } qos_ulp_sl + + | qos_ulp_type_lustre_port_guid list_of_ranges TK_DOTDOT { + /* lustre, port-guid ... : sl */ + uint64_t ** range_arr; + unsigned range_len; + + if (!cl_list_count(&tmp_parser_struct.num_pair_list)) + { + yyerror("Lustre ULP rule doesn't have port guids"); + return 1; + } + + /* create a new port group with these ports */ + __parser_port_group_start(); + + p_current_port_group->name = strdup("_Lustre_Targets_"); + p_current_port_group->use = strdup("Generated from ULP rules"); + + __rangelist2rangearr( &tmp_parser_struct.num_pair_list, + &range_arr, + &range_len ); + + __parser_add_guid_range_to_port_map( + &p_current_port_group->port_map, + range_arr, + range_len); + + /* add this port group to the destination + groups of the current match rule */ + cl_list_insert_tail(&p_current_qos_match_rule->destination_group_list, + p_current_port_group); + + __parser_port_group_end(); + + /* setup ranges as in qos_ulp_type_lustre_default */ + range_arr = (uint64_t **)malloc(sizeof(uint64_t *)); + range_arr[0] = (uint64_t *)malloc(2*sizeof(uint64_t)); + range_arr[0][0] = OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID; + range_arr[0][1] = OSM_QOS_POLICY_ULP_LUSTRE_SERVICE_ID + 0xFFFF; + + p_current_qos_match_rule->service_id_range_arr = range_arr; + p_current_qos_match_rule->service_id_range_len = 1; + + } qos_ulp_sl ; qos_ulp_type_any_service: TK_ULP_ANY_SERVICE_ID @@ -989,6 +1088,15 @@ qos_ulp_type_ipoib_default: TK_ULP_IPOIB_DEFAULT qos_ulp_type_ipoib_pkey: TK_ULP_IPOIB_PKEY { __parser_ulp_match_rule_start(); }; +qos_ulp_type_lustre_default: TK_ULP_LUSTRE_DEFAULT + { __parser_ulp_match_rule_start(); }; + +qos_ulp_type_lustre_port: TK_ULP_LUSTRE_PORT + { __parser_ulp_match_rule_start(); }; + +qos_ulp_type_lustre_port_guid: TK_ULP_LUSTRE_PORT_GUID + { __parser_ulp_match_rule_start(); }; + qos_ulp_sl: single_number { /* get the SL for ULP rules */ -- 1.6.0.4 -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org More majordomo info at http://vger.kernel.org/majordomo-info.html