From: rmccabe@sourceware.org <rmccabe@sourceware.org>
To: cluster-devel.redhat.com
Subject: [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte ...
Date: 2 Oct 2006 20:53:37 -0000 [thread overview]
Message-ID: <20061002205337.16975.qmail@sourceware.org> (raw)
CVSROOT: /cvs/cluster
Module name: conga
Changes by: rmccabe at sourceware.org 2006-10-02 20:53:37
Modified files:
luci/site/luci/Extensions: cluster_adapters.py
Log message:
fix some of the resource backend code to accept POST as well as GET
Patches:
http://sourceware.org/cgi-bin/cvsweb.cgi/conga/luci/site/luci/Extensions/cluster_adapters.py.diff?cvsroot=cluster&r1=1.76&r2=1.77
--- conga/luci/site/luci/Extensions/cluster_adapters.py 2006/09/28 22:04:27 1.76
+++ conga/luci/site/luci/Extensions/cluster_adapters.py 2006/10/02 20:53:37 1.77
@@ -21,7 +21,7 @@
#folder, then only the admin user may see this menu, and
#the configure option should not be displayed.
#2)If there are clusters in the ManagedClusterSystems,
-#then only display chooser if the current user has
+#then only display chooser if the current user has
#permissions on at least one. If the user is admin, show ALL clusters
from homebase_adapters import nodeAuth, nodeUnauth, manageCluster, createClusterSystems
@@ -187,11 +187,11 @@
flag.manage_addProperty(BATCH_ID,batch_id, "string")
flag.manage_addProperty(TASKTYPE,CLUSTER_ADD, "string")
flag.manage_addProperty(FLAG_DESC,"Creating node " + key + " for cluster " + clusterName, "string")
-
+
def validateAddClusterNode(self, request):
errors = list()
- messages = list()
+ messages = list()
requestResults = {}
try:
@@ -542,7 +542,7 @@
def createCluChooser(self, request, systems):
dummynode = {}
-
+
if request.REQUEST_METHOD == 'POST':
ret = validatePost(self, request)
try:
@@ -553,7 +553,7 @@
try: request.SESSION.set('checkRet', {})
except: pass
- #First, see if a cluster is chosen, then
+ #First, see if a cluster is chosen, then
#check that the current user can access that system
cname = None
try:
@@ -581,7 +581,7 @@
cldata['currentItem'] = True
else:
cldata['currentItem'] = False
-
+
cladd = {}
cladd['Title'] = "Create"
cladd['cfg_type'] = "clusteradd"
@@ -591,7 +591,7 @@
cladd['currentItem'] = True
else:
cladd['currentItem'] = False
-
+
clcfg = {}
clcfg['Title'] = "Configure"
clcfg['cfg_type'] = "clustercfg"
@@ -609,8 +609,8 @@
clcfg['show_children'] = True
else:
clcfg['show_children'] = False
-
- #loop through all clusters
+
+ #loop through all clusters
syslist= list()
for system in systems:
clsys = {}
@@ -629,7 +629,7 @@
syslist.append(clsys)
clcfg['children'] = syslist
-
+
mylist = list()
mylist.append(cldata)
mylist.append(cladd)
@@ -674,7 +674,7 @@
nd['currentItem'] = True
else:
nd['currentItem'] = False
-
+
ndadd = {}
ndadd['Title'] = "Add a Node"
@@ -685,7 +685,7 @@
ndadd['currentItem'] = True
else:
ndadd['currentItem'] = False
-
+
ndcfg = {}
ndcfg['Title'] = "Configure"
ndcfg['cfg_type'] = "nodecfg"
@@ -699,11 +699,11 @@
ndcfg['currentItem'] = True
else:
ndcfg['currentItem'] = False
-
+
nodes = model.getNodes()
nodenames = list()
for node in nodes:
- nodenames.append(node.getName())
+ nodenames.append(node.getName())
cfgablenodes = list()
for nodename in nodenames:
@@ -723,9 +723,9 @@
cfg['currentItem'] = False
else:
cfg['currentItem'] = False
-
+
cfgablenodes.append(cfg)
-
+
#Now add nodename structs as children of the config element
ndcfg['children'] = cfgablenodes
@@ -749,7 +749,7 @@
sv['currentItem'] = True
else:
sv['currentItem'] = False
-
+
svadd = {}
svadd['Title'] = "Add a Service"
svadd['cfg_type'] = "serviceadd"
@@ -759,7 +759,7 @@
svadd['currentItem'] = True
else:
svadd['currentItem'] = False
-
+
svcfg = {}
svcfg['Title'] = "Configure a Service"
svcfg['cfg_type'] = "servicecfg"
@@ -794,16 +794,16 @@
svc['currentItem'] = False
else:
svc['currentItem'] = False
-
+
serviceable.append(svc)
- svcfg['children'] = serviceable
+ svcfg['children'] = serviceable
+
-
kids = list()
kids.append(svadd)
kids.append(svcfg)
- sv['children'] = kids
+ sv['children'] = kids
#############################################################
rv = {}
rv['Title'] = "Resources"
@@ -818,7 +818,7 @@
rv['currentItem'] = True
else:
rv['currentItem'] = False
-
+
rvadd = {}
rvadd['Title'] = "Add a Resource"
rvadd['cfg_type'] = "resourceadd"
@@ -828,7 +828,7 @@
rvadd['currentItem'] = True
else:
rvadd['currentItem'] = False
-
+
rvcfg = {}
rvcfg['Title'] = "Configure a Resource"
rvcfg['cfg_type'] = "resourcecfg"
@@ -863,17 +863,17 @@
rvc['currentItem'] = False
else:
rvc['currentItem'] = False
-
+
resourceable.append(rvc)
- rvcfg['children'] = resourceable
+ rvcfg['children'] = resourceable
+
-
kids = list()
kids.append(rvadd)
kids.append(rvcfg)
- rv['children'] = kids
- #################################################################
+ rv['children'] = kids
+ #################################################################
fd = {}
fd['Title'] = "Failover Domains"
fd['cfg_type'] = "failoverdomains"
@@ -887,7 +887,7 @@
fd['currentItem'] = True
else:
fd['currentItem'] = False
-
+
fdadd = {}
fdadd['Title'] = "Add a Failover Domain"
fdadd['cfg_type'] = "failoverdomainadd"
@@ -897,7 +897,7 @@
fdadd['currentItem'] = True
else:
fdadd['currentItem'] = False
-
+
fdcfg = {}
fdcfg['Title'] = "Configure a Failover Domain"
fdcfg['cfg_type'] = "failoverdomaincfg"
@@ -932,16 +932,16 @@
fdc['currentItem'] = False
else:
fdc['currentItem'] = False
-
+
fdomable.append(fdc)
- fdcfg['children'] = fdomable
+ fdcfg['children'] = fdomable
+
-
kids = list()
kids.append(fdadd)
kids.append(fdcfg)
- fd['children'] = kids
+ fd['children'] = kids
#############################################################
fen = {}
fen['Title'] = "Fence Devices"
@@ -956,7 +956,7 @@
fen['currentItem'] = True
else:
fen['currentItem'] = False
-
+
fenadd = {}
fenadd['Title'] = "Add a Fence Device"
fenadd['cfg_type'] = "fencedeviceadd"
@@ -966,7 +966,7 @@
fenadd['currentItem'] = True
else:
fenadd['currentItem'] = False
-
+
fencfg = {}
fencfg['Title'] = "Configure a Fence Device"
fencfg['cfg_type'] = "fencedevicecfg"
@@ -1001,16 +1001,16 @@
fenc['currentItem'] = False
else:
fenc['currentItem'] = False
-
+
fenceable.append(fenc)
- fencfg['children'] = fenceable
+ fencfg['children'] = fenceable
+
-
kids = list()
kids.append(fenadd)
kids.append(fencfg)
- fen['children'] = kids
+ fen['children'] = kids
#############################################################
mylist = list()
@@ -1026,10 +1026,10 @@
def getClusterName(self, model):
- return model.getClusterName()
+ return model.getClusterName()
def getClusterAlias(self, model):
- alias = model.getClusterAlias()
+ alias = model.getClusterAlias()
if alias == None:
return model.getClusterName()
else:
@@ -1060,16 +1060,16 @@
base2 = req['HTTP_HOST'] + req['SERVER_PORT']
htab = { 'Title':"homebase",
- 'Description':"Home base for this luci server",
+ 'Description':"Home base for this luci server",
'Taburl':"/luci/homebase"}
if selectedtab == "homebase":
htab['isSelected'] = True
else:
htab['isSelected'] = False
-
+
ctab = { 'Title':"cluster",
- 'Description':"Cluster configuration page",
+ 'Description':"Cluster configuration page",
'Taburl':"/luci/cluster?pagetype=3"}
if selectedtab == "cluster":
ctab['isSelected'] = True
@@ -1077,16 +1077,16 @@
ctab['isSelected'] = False
stab = { 'Title':"storage",
- 'Description':"Storage configuration page",
+ 'Description':"Storage configuration page",
'Taburl':"/luci/storage"}
if selectedtab == "storage":
stab['isSelected'] = True
else:
stab['isSelected'] = False
- portaltabs.append(htab)
- portaltabs.append(ctab)
- portaltabs.append(stab)
+ portaltabs.append(htab)
+ portaltabs.append(ctab)
+ portaltabs.append(stab)
return portaltabs
@@ -1123,7 +1123,7 @@
return None
else:
return None
-
+
def getRicciAgent(self, clustername):
#Check cluster permission here! return none if false
path = CLUSTER_FOLDER_PATH + clustername[0]
@@ -1137,7 +1137,7 @@
return ""
else:
return ""
-
+
def getClusterStatus(self, ricci_name):
rb = ricci_bridge(ricci_name)
@@ -1177,8 +1177,8 @@
vals['failed'] = node.getAttribute('failed')
vals['autostart'] = node.getAttribute('autostart')
results.append(vals)
-
- return results
+
+ return results
def getServicesInfo(self, status, modelb, req):
map = {}
@@ -1190,7 +1190,7 @@
itemmap = {}
itemmap['name'] = item['name']
if item['running'] == "true":
- itemmap['running'] = "true"
+ itemmap['running'] = "true"
itemmap['nodename'] = item['nodename']
itemmap['autostart'] = item['autostart']
itemmap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&servicename=" + item['name'] + "&pagetype=" + SERVICE
@@ -1206,7 +1206,7 @@
map['services'] = maplist
return map
-
+
def getServiceInfo(self,status,modelb,req):
#set up struct for service config page
baseurl = req['URL']
@@ -1269,7 +1269,7 @@
children = svc.getChildren()
for child in children:
recurse_resources(root_uuid, child, resource_list, indent_ctr)
-
+
hmap['resource_list'] = resource_list
return hmap
@@ -1289,7 +1289,7 @@
rc_map['type'] = child.getResourceType()
rc_map['indent_ctr'] = indent_ctr
-
+
#Note: Final version needs all resource attrs
rc_map['attrs'] = child.getAttributes()
rc_map['uuid'] = make_uuid('resource')
@@ -1305,7 +1305,7 @@
rc_map['max_depth'] = child_depth
return child_depth + 1
-
+
def serviceStart(self, ricci_agent, req):
rb = ricci_bridge(ricci_agent)
svcname = req['servicename']
@@ -1356,7 +1356,7 @@
flag = self.restrictedTraverse(objpath)
#flag[BATCH_ID] = batch_id
#flag[TASKTYPE] = SERVICE_RESTART
- #flag[FLAG_DESC] = "Restarting service " + svcname
+ #flag[FLAG_DESC] = "Restarting service " + svcname
flag.manage_addProperty(BATCH_ID,batch_id, "string")
flag.manage_addProperty(TASKTYPE,SERVICE_RESTART, "string")
flag.manage_addProperty(FLAG_DESC,"Restarting service " + svcname, "string")
@@ -1427,7 +1427,7 @@
for nitem in nlist:
if nitem['name'] == ndname:
break
- nodesmap['nodename'] = ndname
+ nodesmap['nodename'] = ndname
nodesmap['nodecfgurl'] = baseurl + "?clustername=" + clustername + "&nodename=" + ndname + "&pagetype=" + NODE
if nitem['clustered'] == "true":
nodesmap['status'] = NODE_ACTIVE
@@ -1460,7 +1460,7 @@
fdom_map['svclist'] = svclist
fdomlist.append(fdom_map)
return fdomlist
-
+
def processClusterProps(self, ricci_agent, request):
#First, retrieve cluster.conf from session
conf = request.SESSION.get('conf')
@@ -1502,8 +1502,8 @@
else:
return
-
-
+
+
def getClusterInfo(self, model, req):
cluname = req[CLUNAME]
baseurl = req['URL'] + "?" + PAGETYPE + "=" + CLUSTER_PROCESS + "&" + CLUNAME + "=" + cluname + "&"
@@ -1546,7 +1546,7 @@
else:
map['is_mcast'] = "False"
map['mcast_addr'] = "1.2.3.4"
-
+
#-------------
#quorum disk params
quorumd_url = baseurl + ACTIONTYPE + "=" + QUORUMD
@@ -1710,7 +1710,7 @@
flag.manage_addProperty(BATCH_ID,batch_id, "string")
flag.manage_addProperty(TASKTYPE,NODE_LEAVE_CLUSTER, "string")
flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' leaving cluster", "string")
-
+
response = request.RESPONSE
#Is this correct? Should we re-direct to the cluster page?
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1730,7 +1730,7 @@
flag.manage_addProperty(BATCH_ID,batch_id, "string")
flag.manage_addProperty(TASKTYPE,NODE_JOIN_CLUSTER, "string")
flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' joining cluster", "string")
-
+
response = request.RESPONSE
#Once again, is this correct? Should we re-direct to the cluster page?
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1751,7 +1751,7 @@
flag.manage_addProperty(BATCH_ID,batch_id, "string")
flag.manage_addProperty(TASKTYPE,NODE_REBOOT, "string")
flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being rebooted", "string")
-
+
response = request.RESPONSE
#Once again, is this correct? Should we re-direct to the cluster page?
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1789,7 +1789,7 @@
flag.manage_addProperty(BATCH_ID,batch_id, "string")
flag.manage_addProperty(TASKTYPE,NODE_FENCE, "string")
flag.manage_addProperty(FLAG_DESC,"Node \'" + nodename + "\' is being fenced", "string")
-
+
response = request.RESPONSE
#Once again, is this correct? Should we re-direct to the cluster page?
response.redirect(request['URL'] + "?pagetype=" + CLUSTER_CONFIG + "&clustername=" + clustername)
@@ -1799,7 +1799,7 @@
#We need to get a node name other than the node
#to be deleted, then delete the node from the cluster.conf
#and propogate it. We will need two ricci agents for this task.
-
+
#First, delete cluster.conf from node to be deleted.
#next, have node leave cluster.
@@ -1853,7 +1853,7 @@
flag.manage_addProperty(FLAG_DESC,"Deleting node \'" + nodename + "\'", "string")
response = request.RESPONSE
response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
-
+
def getNodeInfo(self, model, status, request):
infohash = {}
@@ -1882,17 +1882,17 @@
nodestate = NODE_ACTIVE
else:
nodestate = NODE_INACTIVE
-
+
infohash['nodestate'] = nodestate
infohash['nodename'] = nodename
-
+
#set up drop down links
if nodestate == NODE_ACTIVE:
infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
infohash['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
infohash['fence_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + nodename + "&clustername=" + clustername
infohash['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + nodename + "&clustername=" + clustername
-
+
if nodestate == NODE_INACTIVE:
infohash['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + nodename + "&clustername=" + clustername
infohash['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + nodename + "&clustername=" + clustername
@@ -1909,7 +1909,7 @@
svc_dict['servicename'] = svcname
svc_dict['svcurl'] = svcurl
svc_dict_list.append(svc_dict)
-
+
infohash['currentservices'] = svc_dict_list
#next is faildoms
@@ -1921,9 +1921,9 @@
fdomurl = baseurl + "?" + PAGETYPE + "=" + FDOM_CONFIG + "&" + CLUNAME + "=" + clustername + "&fdomname=" + fdom.getName()
fdom_dict['fdomurl'] = fdomurl
fdom_dict_list.append(fdom_dict)
-
+
infohash['fdoms'] = fdom_dict_list
-
+
#return infohash
infohash['d_states'] = None
if nodestate == NODE_ACTIVE or nodestate == NODE_INACTIVE:
@@ -1936,10 +1936,10 @@
dlist.append("rgmanager")
states = rb.getDaemonStates(dlist)
infohash['d_states'] = states
-
- infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername
+
+ infohash['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + nodename + "&clustername=" + clustername
return infohash
- #get list of faildoms for node
+ #get list of faildoms for node
def getNodesInfo(self, model,status,req):
resultlist = list()
@@ -1973,14 +1973,14 @@
map['status'] = NODE_INACTIVE
map['status_str'] = NODE_INACTIVE_STR
- map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername
+ map['logurl'] = baseurl + "?pagetype=" + NODE_LOGS + "&nodename=" + name + "&clustername=" + clustername
#set up URLs for dropdown menu...
if map['status'] == NODE_ACTIVE:
map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_LEAVE_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
map['reboot_url'] = baseurl + "?pagetype=" +NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
map['fence_it_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_FENCE + "&nodename=" + name + "&clustername=" + clustername
map['delete_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_DELETE + "&nodename=" + name + "&clustername=" + clustername
-
+
if map['status'] == NODE_INACTIVE:
map['jl_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_JOIN_CLUSTER + "&nodename=" + name + "&clustername=" + clustername
map['reboot_url'] = baseurl + "?pagetype=" + NODE_PROCESS + "&task=" + NODE_REBOOT + "&nodename=" + name + "&clustername=" + clustername
@@ -2028,7 +2028,7 @@
cluname = req['clustername']
path = CLUSTER_FOLDER_PATH + cluname
clusterfolder = self.restrictedTraverse(path)
- items = clusterfolder.objectItems('ManagedSystem')
+ items = clusterfolder.objectItems('ManagedSystem')
#Ok, here is what is going on...if there is an item,
#we need to call the ricci_bridge and get a batch report.
#This report will tell us one of three things:
@@ -2040,7 +2040,7 @@
#Check here for more than 1 entry (an error)
ricci = item[0].split("____") #This removes the 'flag' suffix
rb = ricci_bridge(ricci[0])
- finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
+ finished = rb.checkBatch(item[1].getProperty(BATCH_ID))
if finished == True:
clusterfolder.manage_delObjects(item[0])
map['refreshurl'] = '5; url=\".\"'
@@ -2054,7 +2054,7 @@
if dex != (-1):
tmpstr = part2[:dex] #This strips off busyfirst var
part2 = tmpstr
- ###FIXME - The above assumes that the 'busyfirst' query var is at the
+ ###FIXME - The above assumes that the 'busyfirst' query var is at the
###end of the URL...
wholeurl = part1 + "?" + part2
#map['url'] = "5, url=" + req['ACTUAL_URL'] + "?" + req['QUERY_STRING']
@@ -2086,31 +2086,45 @@
itemmap['delurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + item.getName() + "&pagetype=" + RESOURCE_REMOVE
resList.append(itemmap)
return resList
-
-def getResourceInfo(modelb, request):
- resMap = {}
+def getResourceInfo(modelb, request):
try:
name = request['resourcename']
- baseurl = request['URL']
+ except KeyError, e:
+ name = request.form['resourcename']
+ except:
+ return {}
+
+ try:
cluname = request['clustername']
+ except KeyError, e:
+ cluname = request.form['clustername']
+ except:
+ return {}
+
+ try:
+ baseurl = request['URL']
+ except:
+ return {}
- for res in modelb.getResources():
- if res.getName() == name:
+ for res in modelb.getResources():
+ if res.getName() == name:
+ resMap = {}
+ try:
resMap['name'] = res.getName()
resMap['type'] = res.resource_type
resMap['tag_name'] = res.TAG_NAME
resMap['attrs'] = res.attr_hash
resMap['cfgurl'] = baseurl + "?" + "clustername=" + cluname + "&resourcename=" + res.getName() + "&pagetype=" + RESOURCE_CONFIG
return resMap
- except: pass
- return {}
+ except:
+ return {}
def delResource(self, request, ragent):
modelb = request.SESSION.get('model')
resPtr = modelb.getResourcesPtr()
resources = resPtr.getChildren()
- name = request['resourcename']
+ name = request['resourcename']
for res in resources:
if res.getName() == name:
resPtr.removeChild(res)
@@ -2143,11 +2157,11 @@
response.redirect(request['HTTP_REFERER'] + "&busyfirst=true")
-
+
def addResource(self, request, ragent):
if not request.form:
return "Nothing submitted, no changes made."
-
+
if request.form['type'] != 'ip' and not request.form['resourceName']:
return "Please enter a name for the resource."
types = {'ip': addIp,
@@ -2158,8 +2172,8 @@
'nfsc': addNfsx,
'scr': addScr,
'smb': addSmb}
-
-
+
+
type = request.form["type"]
res = types[type](request)
modelb = request.SESSION.get('model')
@@ -2195,12 +2209,12 @@
def getResourceForEdit(modelb, name):
resPtr = modelb.getResourcesPtr()
resources = resPtr.getChildren()
-
+
for res in resources:
if res.getName() == name:
resPtr.removeChild(res)
break
-
+
return res
def addIp(request):
@@ -2236,20 +2250,20 @@
res.attr_hash["force_unmount"] = '1'
else:
res.attr_hash["force_unmount"] = '0'
-
+
if form.has_key('selffence'):
res.attr_hash["self_fence"] = '1'
else:
res.attr_hash["self_fence"] = '0'
-
+
if form.has_key('checkfs'):
res.attr_hash["force_fsck"] = '1'
else:
res.attr_hash["force_fsck"] = '0'
-
+
modelb.getResourcesPtr().addChild(res)
return res
-
+
def addGfs(request):
modelb = request.SESSION.get('model')
if request.form.has_key('edit'):
@@ -2262,12 +2276,12 @@
res.attr_hash["device"] = form["device"]
res.attr_hash["options"] = form["options"]
res.attr_hash["fsid"] = form["fsid"]
-
+
if form.has_key('forceunmount'):
res.attr_hash["force_unmount"] = '1'
else:
res.attr_hash["force_unmount"] = '0'
-
+
modelb.getResourcesPtr().addChild(res)
return res
@@ -2284,15 +2298,15 @@
res.attr_hash["options"] = form["options"]
res.attr_hash["exportpath"] = form["export"]
res.attr_hash["nfstype"] = form["fstype"]
-
+
if form.has_key('forceunmount'):
res.attr_hash["force_unmount"] = '1'
else:
res.attr_hash["force_unmount"] = '0'
-
+
modelb.getResourcesPtr().addChild(res)
return res
-
+
def addNfsc(request):
modelb = request.SESSION.get('model')
if request.form.has_key('edit'):
@@ -2303,10 +2317,10 @@
res.attr_hash["name"] = form["resourceName"]
res.attr_hash["target"] = form["target"]
res.attr_hash["options"] = form["options"]
-
+
modelb.getResourcesPtr().addChild(res)
return res
-
+
def addNfsx(request):
modelb = request.SESSION.get('model')
if request.form.has_key('edit'):
@@ -2315,7 +2329,7 @@
res = apply(NFSExport)
form = request.form
res.attr_hash["name"] = form["resourceName"]
-
+
modelb.getResourcesPtr().addChild(res)
return res
@@ -2328,10 +2342,10 @@
form = request.form
res.attr_hash["name"] = form["resourceName"]
res.attr_hash["file"] = form["file"]
-
+
modelb.getResourcesPtr().addChild(res)
return res
-
+
def addSmb(request):
modelb = request.SESSION.get('model')
if request.form.has_key('edit'):
@@ -2341,16 +2355,15 @@
form = request.form
res.attr_hash["name"] = form["resourceName"]
res.attr_hash["workgroup"] = form["workgroup"]
-
+
modelb.getResourcesPtr().addChild(res)
return res
-
+
def appendModel(request, model):
- try:
- request.SESSION.set('model', model)
- except:
- pass
- return
+ try:
+ request.SESSION.set('model', model)
+ except:
+ pass
def resolve_nodename(self, clustername, nodename):
path = CLUSTER_FOLDER_PATH + clustername
next reply other threads:[~2006-10-02 20:53 UTC|newest]
Thread overview: 185+ messages / expand[flat|nested] mbox.gz Atom feed top
2006-10-02 20:53 rmccabe [this message]
-- strict thread matches above, loose matches on Subject: below --
2008-07-17 16:36 [Cluster-devel] conga/luci/site/luci/Extensions cluster_adapte rmccabe
2008-04-18 20:37 rmccabe
2007-12-12 15:45 rmccabe
2007-08-23 19:00 rmccabe
2007-08-22 20:57 rmccabe
2007-05-03 19:51 rmccabe
2007-04-02 16:35 rmccabe
2007-04-02 15:56 rmccabe
2007-03-27 2:03 rmccabe
2007-03-16 3:19 rmccabe
2007-03-16 3:19 rmccabe
2007-03-16 3:19 rmccabe
2007-03-13 3:07 rmccabe
2007-03-13 3:06 rmccabe
2007-03-12 5:47 rmccabe
2007-03-12 5:46 rmccabe
2007-03-12 5:46 rmccabe
2007-03-06 22:48 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-03-01 20:22 rmccabe
2007-02-13 19:50 rmccabe
2007-02-13 19:50 rmccabe
2007-02-12 20:25 rmccabe
2007-02-12 20:24 rmccabe
2007-02-07 22:00 rmccabe
2007-02-07 21:30 rmccabe
2007-02-05 19:56 rmccabe
2007-01-31 23:45 rmccabe
2007-01-31 19:28 rmccabe
2007-01-31 18:50 rmccabe
2007-01-30 21:41 jparsons
2007-01-30 21:21 jparsons
2007-01-30 21:05 jparsons
2007-01-29 23:30 rmccabe
2007-01-26 19:35 rmccabe
2007-01-18 2:48 rmccabe
2007-01-17 22:26 rmccabe
2007-01-17 22:14 rmccabe
2007-01-10 23:33 jparsons
2007-01-10 22:45 rmccabe
2007-01-10 20:06 rmccabe
2006-12-20 20:40 jparsons
2006-12-14 21:37 rmccabe
2006-12-14 17:03 rmccabe
2006-12-08 23:02 rmccabe
2006-11-30 20:12 jparsons
2006-11-27 21:06 rmccabe
2006-11-27 21:05 rmccabe
2006-11-27 18:15 rmccabe
2006-11-20 23:32 rmccabe
2006-11-20 15:05 jparsons
2006-11-17 5:50 rmccabe
2006-11-17 5:48 rmccabe
2006-11-10 18:18 rmccabe
2006-11-10 17:59 rmccabe
2006-11-09 22:30 rmccabe
2006-11-09 14:17 rmccabe
2006-11-08 21:42 rmccabe
2006-11-08 15:52 jparsons
2006-11-07 20:14 jparsons
2006-11-07 20:13 jparsons
2006-11-07 2:36 jparsons
2006-11-07 1:32 jparsons
2006-11-06 23:55 rmccabe
2006-11-05 0:59 rmccabe
2006-11-03 21:13 jparsons
2006-11-03 1:24 rmccabe
2006-11-03 1:08 rmccabe
2006-11-02 20:58 rmccabe
2006-11-02 20:45 rmccabe
2006-11-02 20:41 rmccabe
2006-11-02 3:17 rmccabe
2006-10-31 17:18 rmccabe
2006-10-31 0:16 rmccabe
2006-10-30 22:52 rmccabe
2006-10-30 20:43 jparsons
2006-10-27 1:11 rmccabe
2006-10-25 0:43 rmccabe
2006-10-24 14:08 rmccabe
2006-10-23 20:47 jparsons
2006-10-20 22:09 rmccabe
2006-10-20 21:59 rmccabe
2006-10-19 14:57 rmccabe
2006-10-18 23:12 rmccabe
2006-10-18 19:16 rmccabe
2006-10-16 21:01 rmccabe
2006-10-16 20:51 jparsons
2006-10-16 19:17 jparsons
2006-10-16 5:28 rmccabe
2006-10-16 4:54 rmccabe
2006-10-16 4:51 rmccabe
2006-10-13 22:56 rmccabe
2006-10-12 22:11 jparsons
2006-10-12 21:00 kupcevic
2006-10-12 20:54 jparsons
2006-10-12 20:48 jparsons
2006-10-12 19:40 rmccabe
2006-10-12 17:27 jparsons
2006-10-12 17:08 jparsons
2006-10-12 15:50 jparsons
2006-10-12 15:45 jparsons
2006-10-12 0:04 jparsons
2006-10-11 23:56 jparsons
2006-10-11 23:11 jparsons
2006-10-11 23:08 rmccabe
2006-10-11 22:37 jparsons
2006-10-11 20:58 jparsons
2006-10-11 17:43 jparsons
2006-10-11 17:29 rmccabe
2006-10-11 16:35 jparsons
2006-10-11 16:25 jparsons
2006-10-11 16:18 rmccabe
2006-10-10 21:33 kupcevic
2006-10-09 20:21 rmccabe
2006-10-04 16:20 rmccabe
2006-10-04 16:05 jparsons
2006-10-04 15:11 jparsons
2006-10-02 22:30 rmccabe
2006-10-02 21:42 rmccabe
2006-10-02 21:09 rmccabe
2006-09-28 22:04 rmccabe
2006-09-28 20:10 rmccabe
2006-09-27 18:46 rmccabe
2006-09-27 16:18 jparsons
2006-09-27 15:51 jparsons
2006-09-27 15:35 jparsons
2006-09-25 22:59 rmccabe
2006-09-22 18:24 rmccabe
2006-08-30 22:59 rmccabe
2006-08-22 17:46 jparsons
2006-08-22 17:41 jparsons
2006-08-16 23:40 jparsons
2006-08-16 21:56 jparsons
2006-08-16 21:54 jparsons
2006-08-16 21:51 jparsons
2006-08-16 19:14 rmccabe
2006-08-16 16:10 jparsons
2006-08-14 15:12 jparsons
2006-08-13 19:38 jparsons
2006-08-13 19:37 jparsons
2006-08-13 18:36 jparsons
2006-08-13 16:32 jparsons
2006-08-13 16:15 jparsons
2006-08-13 15:02 jparsons
2006-08-13 14:57 jparsons
2006-08-13 13:48 jparsons
2006-08-12 21:13 jparsons
2006-08-12 20:31 jparsons
2006-08-12 18:22 jparsons
2006-08-12 17:53 jparsons
2006-08-11 0:29 jparsons
2006-08-10 23:06 shuennek
2006-08-10 16:50 jparsons
2006-08-10 14:16 jparsons
2006-08-09 22:05 jparsons
2006-08-09 21:48 jparsons
2006-08-03 13:37 jparsons
2006-08-02 18:59 rmccabe
2006-08-02 17:25 rmccabe
2006-08-01 15:29 jparsons
2006-08-01 15:25 jparsons
2006-08-01 15:20 jparsons
2006-08-01 15:13 jparsons
2006-08-01 15:04 jparsons
2006-07-31 18:21 rmccabe
2006-07-28 19:03 jparsons
2006-07-28 18:57 jparsons
2006-07-28 18:40 jparsons
2006-07-28 14:16 jparsons
2006-07-28 14:02 jparsons
2006-07-28 11:46 jparsons
2006-07-27 16:34 jparsons
2006-07-27 15:53 rmccabe
2006-07-25 20:16 jparsons
2006-07-25 20:01 jparsons
2006-07-25 0:56 jparsons
2006-07-24 21:51 jparsons
2006-07-24 21:13 jparsons
2006-07-24 19:50 jparsons
2006-07-19 22:28 rmccabe
2006-07-19 21:38 rmccabe
2006-07-19 20:57 rmccabe
2006-07-19 20:19 rmccabe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20061002205337.16975.qmail@sourceware.org \
--to=rmccabe@sourceware.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).