From: rmccabe@sourceware.org <rmccabe@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] conga/luci cluster/form-macros site/luci/Exten ...
Date: 11 Dec 2006 22:42:35 -0000 [thread overview]
Message-ID: <20061211224235.16846.qmail@sourceware.org> (raw)
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2006-12-11 22:42:35
Modified files:
luci/cluster : form-macros
luci/site/luci/Extensions: cluster_adapters.py
Log message:
more fixes for bz219156
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/cluster/form-macros.diff?cvsroot=cluster&r1=1.127&r2=1.128
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.179&r2=1.180
--- conga/luci/cluster/form-macros 2006/12/11 21:51:13 1.127
+++ conga/luci/cluster/form-macros 2006/12/11 22:42:34 1.128
@@ -2781,11 +2781,19 @@
<div metal:define-macro="nodeprocess-form">
<tal:block
- tal:define="result python: here.nodeTaskProcess(modelb, request)"/>
+ tal:define="result python: here.nodeTaskProcess(modelb, request)">
- <div>
- <span tal:replace="result | nothing" />
- </div>
+ <div id="errmsgsdiv" class="errmsgs"
+ tal:condition="python: result and len(result) > 1 and 'errors' in result[1]">
+ <p class="errmsgs">The following errors occurred:</p>
+
+ <ul class="errmsgs">
+ <tal:block tal:repeat="e python: result[1]['errors']">
+ <li class="errmsgs" tal:content="python:e" />
+ </tal:block>
+ </ul>
+ </div>
+ </tal:block>
</div>
<div metal:define-macro="services-form">
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/11 21:51:14 1.179
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/12/11 22:42:34 1.180
@@ -269,9 +269,8 @@
% (key, str(e)))
def validateAddClusterNode(self, request):
- errors = list()
- messages = list()
requestResults = {}
+ errors = list()
try:
sessionData = request.SESSION.get('checkRet')
@@ -411,7 +410,7 @@
except Exception, e:
luci_log.debug_verbose('vACN7: %s' % str(e))
pass
- next_node_id = 1;
+ next_node_id = 1
for i in nodeList:
next_node_id += 1
new_node = ClusterNode()
@@ -2662,9 +2661,7 @@
if len(clulist) < 1:
return {}
clu = clulist[0]
- cluerror = False
if 'error' in clu:
- cluerror = True
map['error'] = True
clustername = clu['name']
if clu['alias'] != "":
@@ -2702,7 +2699,6 @@
map['currentservices'] = svc_dict_list
node_dict_list = list()
for item in nodelist:
- node_error = 'error' in item
nmap = {}
name = item['name']
nmap['nodename'] = name
@@ -3034,30 +3030,30 @@
def nodeTaskProcess(self, model, request):
try:
clustername = request['clustername']
- except KeyError, e:
+ except:
try:
clustername = request.form['clustername']
except:
- luci_log.debug('missing cluster name for NTP')
- return None
+ luci_log.debug('NTP0: missing cluster name')
+ return (False, {'errors': [ 'No cluster name was given.' ]})
try:
nodename = request['nodename']
- except KeyError, e:
+ except:
try:
nodename = request.form['nodename']
except:
- luci_log.debug('missing nodename name for NTP')
- return None
+ luci_log.debug('NTP1: missing node name')
+ return (False, {'errors': [ 'No node name was given.' ]})
try:
task = request['task']
- except KeyError, e:
+ except:
try:
task = request.form['task']
except:
- luci_log.debug('missing task for NTP')
- return None
+ luci_log.debug('NTP2: missing task')
+ return (False, {'errors': [ 'No node task was given.' ]})
nodename_resolved = resolve_nodename(self, clustername, nodename)
@@ -3067,24 +3063,27 @@
# to be performed.
try:
rc = RicciCommunicator(nodename_resolved)
+ if not rc:
+ raise Exception, 'rc is None'
except RicciError, e:
- luci_log.debug('ricci error from %s: %s' \
+ luci_log.debug('NTP3: ricci error from %s: %s' \
% (nodename_resolved, str(e)))
- return None
+ return (False, {'errors': [ 'Unable to connect to the ricci agent on %s.' % nodename_resolved ]})
except:
- return None
+ luci_log.debug('NTP4: ricci error from %s: %s' \
+ % (nodename_resolved, str(e)))
+ return (False, {'errors': [ 'Unable to connect to the ricci agent on %s.' % nodename_resolved ]})
cluinfo = rc.cluster_info()
if not cluinfo[0] and not cluinfo[1]:
- luci_log.debug('host %s not in a cluster (expected %s)' \
+ luci_log.debug('NTP5: node %s not in a cluster (expected %s)' \
% (nodename_resolved, clustername))
- return None
+ return (False, {'errors': [ 'Node %s reports it is not in a cluster.' % nodename_resolved ]})
cname = lower(clustername)
if cname != lower(cluinfo[0]) and cname != lower(cluinfo[1]):
- luci_log.debug('host %s in unknown cluster %s:%s (expected %s)' \
- % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
- return None
+ luci_log.debug('NTP6: node %s in unknown cluster %s:%s (expected %s)' % (nodename_resolved, cluinfo[0], cluinfo[1], clustername))
+ return (False, {'errors': [ 'Node %s reports it in cluster \"%s\". We expect it to be a member of cluster \"%s\"' % (nodename_resolved, cluinfo[0], clustername) ]})
if not rc.authed():
rc = None
@@ -3103,40 +3102,45 @@
pass
if rc is None:
- return None
+ luci_log.debug('NTP7: node %s is not authenticated' \
+ % nodename_resolved)
+ return (False, {'errors': [ 'Node %s is not authenticated' % nodename_resolved ]})
if task == NODE_LEAVE_CLUSTER:
if nodeLeave(self, rc, clustername, nodename_resolved) is None:
- luci_log.debug_verbose('NTP: nodeLeave failed')
- return None
+ luci_log.debug_verbose('NTP8: nodeLeave failed')
+ return (False, {'errors': [ 'Node %s failed to leave cluster %s' % (nodename_resolved, clustername) ]})
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
elif task == NODE_JOIN_CLUSTER:
if nodeJoin(self, rc, clustername, nodename_resolved) is None:
- luci_log.debug_verbose('NTP: nodeJoin failed')
- return None
+ luci_log.debug_verbose('NTP9: nodeJoin failed')
+ return (False, {'errors': [ 'Node %s failed to join cluster %s' % (nodename_resolved, clustername) ]})
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
elif task == NODE_REBOOT:
if forceNodeReboot(self, rc, clustername, nodename_resolved) is None:
- luci_log.debug_verbose('NTP: nodeReboot failed')
- return None
+ luci_log.debug_verbose('NTP10: nodeReboot failed')
+ return (False, {'errors': [ 'Node %s failed to reboot' \
+ % nodename_resolved ]})
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
elif task == NODE_FENCE:
if forceNodeFence(self, clustername, nodename, nodename_resolved) is None:
- luci_log.debug_verbose('NTP: nodeFencefailed')
- return None
+ luci_log.debug_verbose('NTP11: nodeFencefailed')
+ return (False, {'errors': [ 'Fencing of node %s failed.' \
+ % nodename_resolved]})
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
elif task == NODE_DELETE:
if nodeDelete(self, rc, model, clustername, nodename, nodename_resolved) is None:
- luci_log.debug_verbose('NTP: nodeDelete failed')
- return None
+ luci_log.debug_verbose('NTP12: nodeDelete failed')
+ return (False, {'errors': [ 'Deletion of node %s from cluster %s failed.' % (nodename_resolved, clustername) ]})
+
response = request.RESPONSE
response.redirect(request['URL'] + "?pagetype=" + NODES + "&clustername=" + clustername + '&busyfirst=true')
next reply other threads:[~2006-12-11 22:42 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-12-11 22:42 rmccabe [this message]
-- strict thread matches above, loose matches on Subject: below --
2007-09-21 3:11 [Cluster-devel] conga/luci cluster/form-macros site/luci/Exten rmccabe
2007-06-19 15:54 rmccabe
2007-05-03 20:16 rmccabe
2007-03-15 16:41 rmccabe
2007-03-14 22:38 rmccabe
2007-03-14 22:37 rmccabe
2007-03-05 16:50 rmccabe
2007-03-05 16:50 rmccabe
2007-03-05 16:49 rmccabe
2007-02-15 22:44 rmccabe
2007-02-08 3:46 rmccabe
2007-02-07 17:02 rmccabe
2007-02-07 16:55 rmccabe
2007-02-02 4:34 rmccabe
2007-02-02 0:11 rmccabe
2007-02-01 20:49 rmccabe
2007-01-31 23:36 rmccabe
2007-01-31 5:26 rmccabe
2007-01-23 13:53 rmccabe
2007-01-15 18:21 rmccabe
2007-01-11 19:11 rmccabe
2007-01-10 21:40 rmccabe
2007-01-06 3:29 rmccabe
2006-12-14 23:14 rmccabe
2006-12-14 18:22 rmccabe
2006-12-11 21:51 rmccabe
2006-12-06 22:11 rmccabe
2006-12-06 21:16 rmccabe
2006-11-13 21:40 rmccabe
2006-11-12 2:10 rmccabe
2006-11-09 20:32 rmccabe
2006-11-03 22:48 rmccabe
2006-10-25 1:53 rmccabe
2006-10-25 1:11 rmccabe
2006-10-13 21:25 rmccabe
2006-08-03 18:36 shuennek
2006-07-21 14:49 rmccabe
2006-07-20 16:59 rmccabe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20061211224235.16846.qmail@sourceware.org \
--to=rmccabe@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).