From: rmccabe@sourceware.org <rmccabe@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
Date: 20 Oct 2006 21:59:55 -0000 [thread overview]
Message-ID: <20061020215955.1458.qmail@sourceware.org> (raw)
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2006-10-20 21:59:54
Modified files:
luci/site/luci/Extensions: cluster_adapters.py
Log message:
more logging for debug
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.122&r2=1.123
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/10/18 23:12:31 1.122
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/10/20 21:59:54 1.123
@@ -2594,13 +2594,21 @@
except:
return "Unable to resolve node name %s to retrieve logging information" % nodename_resolved
+ if not rc.authed():
+ try:
+ snode = getStorageNode(self, nodename)
+ setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
+ return "Luci is not authenticated to node %s. Please reauthenticate first." % nodename
+
return getNodeLogs(rc)
def processXenVM(self, req):
model = req.SESSION.get('model')
isNew = False
try:
- xenvmname = req ['servicename']
+ xenvmname = req['servicename']
except KeyError, e:
isNew = True
@@ -2623,14 +2631,27 @@
def getXenVMInfo(self, model, request):
- try:
- xenvmname = request['servicename']
- except KeyError, e:
- return {}
-
- xenvm = model.retrieveXenVMsByName(xenvmname)
- map = xenvm.getAttributes()
- return map
+ try:
+ xenvmname = request['servicename']
+ except KeyError, e:
+ try:
+ xenvmname = request.form['servicename']
+ except:
+ luci_log.debug_verbose('servicename is missing from request')
+ return {}
+ except:
+ luci_log.debug_verbose('servicename is missing from request')
+ return {}
+
+ try:
+ xenvm = model.retrieveXenVMsByName(xenvmname)
+ except:
+ luci_log.debug('An error occurred while attempting to get VM %s' \
+ % xenvmname)
+ return {}
+
+ map = xenvm.getAttributes()
+ return map
def isClusterBusy(self, req):
items = None
@@ -2661,9 +2682,10 @@
try:
items = clusterfolder.objectItems('ManagedSystem')
- if len(items) == 0:
+ if not items or len(items) < 1:
return map #This returns an empty map, and should indicate not busy
except:
+ luci_log.debug('An error occurred while looking for cluster %s flags' % cluname)
return map
map['busy'] = "true"
@@ -2760,7 +2782,10 @@
node_report['statusmessage'] = "Node created successfully" + REDIRECT_MSG
node_report['statusindex'] = creation_status
nodereports.append(node_report)
- clusterfolder.manage_delObjects(item[0])
+ try:
+ clusterfolder.manage_delObjects(item[0])
+ except Exception, e:
+ luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
continue
else:
map['busy'] = "true"
@@ -2783,13 +2808,17 @@
if finished == True:
node_report['desc'] = item[1].getProperty(FLAG_DESC) + REDIRECT_MSG
nodereports.append(node_report)
- clusterfolder.manage_delObjects(item[0])
+ try:
+ clusterfolder.manage_delObjects(item[0])
+ except Exception, e:
+ luci_log.info('Unable to delete %s: %s' % (item[0], str(e)))
else:
node_report = {}
map['busy'] = "true"
isBusy = True
node_report['desc'] = item[1].getProperty(FLAG_DESC)
nodereports.append(node_report)
+
if isBusy:
part1 = req['ACTUAL_URL']
part2 = req['QUERY_STRING']
@@ -2809,12 +2838,14 @@
def getClusterOS(self, rc):
map = {}
+
try:
os_str = resolveOSType(rc.os())
map['os'] = os_str
map['isVirtualized'] = rc.dom0()
except:
# default to rhel5 if something crazy happened.
+ luci_log.debug('An error occurred while attempting to get OS/Virt info for %s -- defaulting to rhel5/False' % rc.hostname())
map['os'] = 'rhel5'
map['isVirtualized'] = False
return map
@@ -2829,8 +2860,10 @@
try:
cluname = request.form['clustername']
except:
+ luci_log.debug_verbose('getResourcesInfo missing cluster name')
return resList
except:
+ luci_log.debug_verbose('getResourcesInfo missing cluster name')
return resList
for item in modelb.getResources():
@@ -2850,8 +2883,10 @@
try:
name = request.form['resourcename']
except:
+ luci_log.debug_verbose('getResourceInfo missing res name')
return {}
except:
+ luci_log.debug_verbose('getResourceInfo missing res name')
return {}
try:
@@ -2860,19 +2895,22 @@
try:
cluname = request.form['clustername']
except:
+ luci_log.debug_verbose('getResourceInfo missing cluster name')
return {}
except:
+ luci_log.debug_verbose('getResourceInfo missing cluster name')
return {}
try:
baseurl = request['URL']
except:
+ luci_log.debug_verbose('getResourceInfo missing URL')
return {}
for res in modelb.getResources():
if res.getName() == name:
- resMap = {}
try:
+ resMap = {}
resMap['name'] = res.getName()
resMap['type'] = res.resource_type
resMap['tag_name'] = res.TAG_NAME
@@ -2880,7 +2918,7 @@
resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
return resMap
except:
- return {}
+ continue
def delResource(self, rc, request):
errstr = 'An error occurred in while attempting to set the cluster.conf'
@@ -2888,11 +2926,19 @@
try:
modelb = request.SESSION.get('model')
except:
+ luci_log.debug_verbose('delResource unable to extract model from SESSION')
return errstr
try:
name = request['resourcename']
except KeyError, e:
+ try:
+ name = request.form['resourcename']
+ except:
+ luci_log.debug_verbose('delResource missing resname %s' % str(e))
+ return errstr + ': ' + str(e)
+ except:
+ luci_log.debug_verbose('delResource missing resname')
return errstr + ': ' + str(e)
try:
@@ -2901,6 +2947,7 @@
try:
clustername = request.form['clustername']
except:
+ luci_log.debug_verbose('delResource missing cluster name')
return errstr + ': could not determine the cluster name.'
try:
@@ -2921,6 +2968,7 @@
break
if not found:
+ luci_log.debug_verbose('delresource cant find res %s' % name)
return errstr + ': the specified resource was not found.'
try:
@@ -2928,10 +2976,12 @@
if not conf:
raise
except:
+ luci_log.debug_verbose('exportModelAsString failed')
return errstr
batch_number, result = setClusterConf(str(conf))
if batch_number is None or result is None:
+ luci_log.debug_verbose('missing batch and/or result from setClusterConf')
return errstr
modelstr = ""
@@ -2939,13 +2989,20 @@
clusterfolder = self.restrictedTraverse(path)
batch_id = str(batch_number)
objname = str(ragent) + '____flag'
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
objpath = str(path + '/' + objname)
- flag = self.restrictedTraverse(objpath)
- flag.manage_addProperty(BATCH_ID, batch_id, "string")
- flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
- flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
+
+ try:
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, RESOURCE_REMOVE, "string")
+ flag.manage_addProperty(FLAG_DESC, "Removing Resource \'" + request['resourcename'] + "\'", "string")
+ except Exception, e:
+ luci_log.debug('An error occurred while setting flag %s: %s' \
+ % (objname, str(e)))
+ except:
+ luci_log.debug('An error occurred while setting flag %s' % objname)
response = request.RESPONSE
response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -2953,8 +3010,8 @@
def addIp(request, form=None):
if form is None:
form = request.form
- modelb = request.SESSION.get('model')
+ modelb = request.SESSION.get('model')
if not modelb or not form:
return None
@@ -3069,6 +3126,7 @@
def addGfs(request, form=None):
if form is None:
form = request.form
+
modelb = request.SESSION.get('model')
if not modelb:
return None
@@ -3079,13 +3137,21 @@
if not oldname:
raise KeyError('oldname is blank.')
res = getResourceForEdit(modelb, oldname)
+ if not res:
+ luci_log.debug('resource %s was not found for editing' % oldname)
+ return None
except KeyError, e:
+ luci_log.debug('resource %s was not found for editing: %s' \
+ % (oldname, str(e)))
return None
else:
- res = apply(Clusterfs)
-
- if not res:
- return None
+ try:
+ res = apply(Clusterfs)
+ if not res:
+ raise
+ except:
+ luci_log.debug('Error creating node Clusterfs resource')
+ return None
# XXX: sanity check these fields
try:
@@ -3094,30 +3160,35 @@
raise
res.attr_hash['name'] = name
except:
+ luci_log.debug_verbose('name is missing in clusterfs res')
return None
try:
mountpoint = form['mountpoint'].strip()
res.attr_hash['mountpoint'] = mountpoint
except:
+ luci_log.debug_verbose('mountpoint is missing in clusterfs res')
return None
try:
device = form['device'].strip()
res.attr_hash['device'] = device
except:
+ luci_log.debug_verbose('device is missing in clusterfs res')
return None
try:
options = form['options'].strip()
res.attr_hash['options'] = options
except:
+ luci_log.debug_verbose('options is missing in clusterfs res')
return None
try:
fsid = form['fsid'].strip()
res.attr_hash['fsid'] = fsid
except:
+ luci_log.debug_verbose('fsid is missing in clusterfs res')
return None
if form.has_key('forceunmount'):
@@ -3373,16 +3444,20 @@
try:
mb_nodes = modelb.getNodes()
if not mb_nodes or not len(mb_nodes):
- raise
- except:
- return 'Unable to find cluster nodes for ' + clusterName
+ raise Exception, 'node list is empty'
+ except Exception, e:
+ luci_log.debug_verbose('no model builder nodes found for %s: %s' \
+ % (str(e), clusterName))
+ return 'Unable to find cluster nodes for %s' % clusterName
try:
cluster_node = self.restrictedTraverse(PLONE_ROOT + '/systems/cluster/' + clusterName)
if not cluster_node:
- raise
- except:
- return 'Unable to find an entry for ' + clusterName + ' in the Luci database.'
+ raise Exception, 'cluster node is none'
+ except Exception, e:
+ luci_log.debug('cant find cluster node for %s: %s'
+ % (clusterName, str(e)))
+ return 'Unable to find an entry for %s in the Luci database.' % clusterName
try:
db_nodes = map(lambda x: x[0], cluster_node.objectItems('Folder'))
@@ -3469,9 +3544,11 @@
try:
ragent = rc.hostname()
if not ragent:
+ luci_log.debug('missing hostname')
raise
batch_number, result = setClusterConf(str(conf))
if batch_number is None or result is None:
+ luci_log.debug('missing batch_number or result')
raise
except:
return "Some error occured in setClusterConf\n"
@@ -3480,17 +3557,24 @@
clusterfolder = self.restrictedTraverse(path)
batch_id = str(batch_number)
objname = str(ragent + '____flag')
- clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
- #Now we need to annotate the new DB object
objpath = str(path + '/' + objname)
- flag = self.restrictedTraverse(objpath)
- flag.manage_addProperty(BATCH_ID, batch_id, "string")
- flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
- if type != 'ip':
- flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
- else:
- flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+ try:
+ clusterfolder.manage_addProduct['ManagedSystem'].addManagedSystem(objname)
+ #Now we need to annotate the new DB object
+ flag = self.restrictedTraverse(objpath)
+ flag.manage_addProperty(BATCH_ID, batch_id, "string")
+ flag.manage_addProperty(TASKTYPE, RESOURCE_ADD, "string")
+
+ if type != 'ip':
+ flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + request.form['resourceName'] + "\'", "string")
+ else:
+ flag.manage_addProperty(FLAG_DESC, "Creating New Resource \'" + res.attr_hash['address'] + "\'", "string")
+ except Exception, e:
+ try:
+ luci_log.info('Unable to create flag %s: %s' % (objpath, str(e)))
+ except:
+ pass
response = request.RESPONSE
response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
@@ -3503,24 +3587,33 @@
if res.getName() == name:
resPtr.removeChild(res)
return res
+
+ luci_log.debug_verbose('unable to find resource \"%s\"' % name)
raise KeyError, name
def appendModel(request, model):
try:
request.SESSION.set('model', model)
except:
- pass
-
- return False
+ luci_log.debug_verbose('Appending model to request failed')
+ return False
def resolve_nodename(self, clustername, nodename):
- path = CLUSTER_FOLDER_PATH + clustername
- clusterfolder = self.restrictedTraverse(path)
- objs = clusterfolder.objectItems('Folder')
+ path = str(CLUSTER_FOLDER_PATH + clustername)
+
+ try:
+ clusterfolder = self.restrictedTraverse(path)
+ objs = clusterfolder.objectItems('Folder')
+ except Exception, e:
+ luci_log.info('resolve_nodename failed for %s/%s: %s' \
+ % (nodename, clustername, str(e)))
+
for obj in objs:
if obj[0].find(nodename) != (-1):
return obj[0]
- raise
+
+ luci_log.info('resolve_nodename failed for %s/%s' % (nodename, clustername))
+ return None
def noNodeFlagsPresent(self, nodefolder, flagname, hostname):
try:
@@ -3536,15 +3629,26 @@
#a flag already exists... try to delete it
try:
rc = RicciCommunicator(hostname)
- except:
- luci_log.info('Unable to connect to the ricci daemon on host ' + hostname)
+ except RicciError, e:
+ luci_log.info('Unable to connect to the ricci daemon: %s' % str(e))
return False
+ if not rc.authed():
+ try:
+ snode = getStorageNode(self, hostname)
+ setNodeFlag(snode, CLUSTER_NODE_NEED_AUTH)
+ except:
+ pass
+ luci_log.info('Node %s is not authenticated' % item[0])
+ return None
+
finished = checkBatch(rc, item[1].getProperty(BATCH_ID))
if finished == True:
try:
nodefolder.manage_delObjects(item[0])
- except:
+ except Exception, e:
+ luci_log.info('manage_delObjects for %s failed: %s' \
+ % (item[0], str(e)))
return False
return True
else:
@@ -3552,8 +3656,22 @@
return False
return True
-def getModelBuilder(rc,isVirtualized):
- cluster_conf_node = getClusterConf(rc)
- modelb = ModelBuilder(0, None, None, cluster_conf_node)
+def getModelBuilder(rc, isVirtualized):
+ try:
+ cluster_conf_node = getClusterConf(rc)
+ if not cluster_conf_node:
+ raise;
+ except:
+ luci_log.debug('unable to get cluster_conf_node in getModelBuilder')
+ return None
+
+ try:
+ modelb = ModelBuilder(0, None, None, cluster_conf_node)
+ except Exception, e:
+ try:
+ luci_log.debug('An error occurred while trying to get modelb for conf \"%s\": %s' % (cluster_conf_node.toxml(), str(e)))
+ except:
+ pass
+
modelb.setIsVirtualized(isVirtualized)
return modelb
next reply other threads:[~2006-10-20 21:59 UTC|newest]
Thread overview: 185+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-10-20 21:59 rmccabe [this message]
-- strict thread matches above, loose matches on Subject: below --
2008-07-17 16:36 [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte rmccabe
2008-04-18 20:37 rmccabe
2007-12-12 15:45 rmccabe
2007-08-23 19:00 rmccabe
2007-08-22 20:57 rmccabe
2007-05-03 19:51 rmccabe
2007-04-02 16:35 rmccabe
2007-04-02 15:56 rmccabe
2007-03-27 2:03 rmccabe
2007-03-16 3:19 rmccabe
2007-03-16 3:19 rmccabe
2007-03-16 3:19 rmccabe
2007-03-13 3:07 rmccabe
2007-03-13 3:06 rmccabe
2007-03-12 5:47 rmccabe
2007-03-12 5:46 rmccabe
2007-03-12 5:46 rmccabe
2007-03-06 22:48 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-02-13 19:50 rmccabe
2007-02-13 19:50 rmccabe
2007-02-12 20:25 rmccabe
2007-02-12 20:24 rmccabe
2007-02-07 22:00 rmccabe
2007-02-07 21:30 rmccabe
2007-02-05 19:56 rmccabe
2007-01-31 23:45 rmccabe
2007-01-31 19:28 rmccabe
2007-01-31 18:50 rmccabe
2007-01-30 21:41 jparsons
2007-01-30 21:21 jparsons
2007-01-30 21:05 jparsons
2007-01-29 23:30 rmccabe
2007-01-26 19:35 rmccabe
2007-01-18 2:48 rmccabe
2007-01-17 22:26 rmccabe
2007-01-17 22:14 rmccabe
2007-01-10 23:33 jparsons
2007-01-10 22:45 rmccabe
2007-01-10 20:06 rmccabe
2006-12-20 20:40 jparsons
2006-12-14 21:37 rmccabe
2006-12-14 17:03 rmccabe
2006-12-08 23:02 rmccabe
2006-11-30 20:12 jparsons
2006-11-27 21:06 rmccabe
2006-11-27 21:05 rmccabe
2006-11-27 18:15 rmccabe
2006-11-20 23:32 rmccabe
2006-11-20 15:05 jparsons
2006-11-17 5:50 rmccabe
2006-11-17 5:48 rmccabe
2006-11-10 18:18 rmccabe
2006-11-10 17:59 rmccabe
2006-11-09 22:30 rmccabe
2006-11-09 14:17 rmccabe
2006-11-08 21:42 rmccabe
2006-11-08 15:52 jparsons
2006-11-07 20:14 jparsons
2006-11-07 20:13 jparsons
2006-11-07 2:36 jparsons
2006-11-07 1:32 jparsons
2006-11-06 23:55 rmccabe
2006-11-05 0:59 rmccabe
2006-11-03 21:13 jparsons
2006-11-03 1:24 rmccabe
2006-11-03 1:08 rmccabe
2006-11-02 20:58 rmccabe
2006-11-02 20:45 rmccabe
2006-11-02 20:41 rmccabe
2006-11-02 3:17 rmccabe
2006-10-31 17:18 rmccabe
2006-10-31 0:16 rmccabe
2006-10-30 22:52 rmccabe
2006-10-30 20:43 jparsons
2006-10-27 1:11 rmccabe
2006-10-25 0:43 rmccabe
2006-10-24 14:08 rmccabe
2006-10-23 20:47 jparsons
2006-10-20 22:09 rmccabe
2006-10-19 14:57 rmccabe
2006-10-18 23:12 rmccabe
2006-10-18 19:16 rmccabe
2006-10-16 21:01 rmccabe
2006-10-16 20:51 jparsons
2006-10-16 19:17 jparsons
2006-10-16 5:28 rmccabe
2006-10-16 4:54 rmccabe
2006-10-16 4:51 rmccabe
2006-10-13 22:56 rmccabe
2006-10-12 22:11 jparsons
2006-10-12 21:00 kupcevic
2006-10-12 20:54 jparsons
2006-10-12 20:48 jparsons
2006-10-12 19:40 rmccabe
2006-10-12 17:27 jparsons
2006-10-12 17:08 jparsons
2006-10-12 15:50 jparsons
2006-10-12 15:45 jparsons
2006-10-12 0:04 jparsons
2006-10-11 23:56 jparsons
2006-10-11 23:11 jparsons
2006-10-11 23:08 rmccabe
2006-10-11 22:37 jparsons
2006-10-11 20:58 jparsons
2006-10-11 17:43 jparsons
2006-10-11 17:29 rmccabe
2006-10-11 16:35 jparsons
2006-10-11 16:25 jparsons
2006-10-11 16:18 rmccabe
2006-10-10 21:33 kupcevic
2006-10-09 20:21 rmccabe
2006-10-04 16:20 rmccabe
2006-10-04 16:05 jparsons
2006-10-04 15:11 jparsons
2006-10-02 22:30 rmccabe
2006-10-02 21:42 rmccabe
2006-10-02 21:09 rmccabe
2006-10-02 20:53 rmccabe
2006-09-28 22:04 rmccabe
2006-09-28 20:10 rmccabe
2006-09-27 18:46 rmccabe
2006-09-27 16:18 jparsons
2006-09-27 15:51 jparsons
2006-09-27 15:35 jparsons
2006-09-25 22:59 rmccabe
2006-09-22 18:24 rmccabe
2006-08-30 22:59 rmccabe
2006-08-22 17:46 jparsons
2006-08-22 17:41 jparsons
2006-08-16 23:40 jparsons
2006-08-16 21:56 jparsons
2006-08-16 21:54 jparsons
2006-08-16 21:51 jparsons
2006-08-16 19:14 rmccabe
2006-08-16 16:10 jparsons
2006-08-14 15:12 jparsons
2006-08-13 19:38 jparsons
2006-08-13 19:37 jparsons
2006-08-13 18:36 jparsons
2006-08-13 16:32 jparsons
2006-08-13 16:15 jparsons
2006-08-13 15:02 jparsons
2006-08-13 14:57 jparsons
2006-08-13 13:48 jparsons
2006-08-12 21:13 jparsons
2006-08-12 20:31 jparsons
2006-08-12 18:22 jparsons
2006-08-12 17:53 jparsons
2006-08-11 0:29 jparsons
2006-08-10 23:06 shuennek
2006-08-10 16:50 jparsons
2006-08-10 14:16 jparsons
2006-08-09 22:05 jparsons
2006-08-09 21:48 jparsons
2006-08-03 13:37 jparsons
2006-08-02 18:59 rmccabe
2006-08-02 17:25 rmccabe
2006-08-01 15:29 jparsons
2006-08-01 15:25 jparsons
2006-08-01 15:20 jparsons
2006-08-01 15:13 jparsons
2006-08-01 15:04 jparsons
2006-07-31 18:21 rmccabe
2006-07-28 19:03 jparsons
2006-07-28 18:57 jparsons
2006-07-28 18:40 jparsons
2006-07-28 14:16 jparsons
2006-07-28 14:02 jparsons
2006-07-28 11:46 jparsons
2006-07-27 16:34 jparsons
2006-07-27 15:53 rmccabe
2006-07-25 20:16 jparsons
2006-07-25 20:01 jparsons
2006-07-25 0:56 jparsons
2006-07-24 21:51 jparsons
2006-07-24 21:13 jparsons
2006-07-24 19:50 jparsons
2006-07-19 22:28 rmccabe
2006-07-19 21:38 rmccabe
2006-07-19 20:57 rmccabe
2006-07-19 20:19 rmccabe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20061020215955.1458.qmail@sourceware.org \
--to=rmccabe@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).