session_str=f.read().strip()
api = PLC(Auth(session=session_str), api_server_url)
# NOTE: What should we do if this call fails?
+ # TODO: handle dns failure here.
api.AuthCheck()
try:
--- /dev/null
+#!/usr/bin/python
+
+from monitor.database.info.model import *
+import sys
+
+fbquery = HistoryNodeRecord.query.all()
+hostnames = [ n.hostname for n in fbquery ]
+
+if True:
+ for hn in hostnames:
+ fbrec = FindbadNodeRecord.query.filter_by(hostname=hn).order_by(FindbadNodeRecord.version.desc()).first()
+ if len(fbrec.versions) >= 2:
+ if fbrec.version != fbrec.versions[-2].version + 1:
+ print fbrec.hostname, fbrec.version, fbrec.versions[-2].version
+ fbrec.version = fbrec.versions[-2].version + 1
+ fbrec.flush()
+
+ session.flush()
+
+fbquery = HistoryPCURecord.query.all()
+pcus = [ n.plc_pcuid for n in fbquery ]
+
+for pcuid in pcus:
+ fbrec = FindbadPCURecord.query.filter_by(plc_pcuid=pcuid).order_by(FindbadPCURecord.version.desc()).first()
+ if len(fbrec.versions) >= 2:
+ if fbrec.version != fbrec.versions[-2].version + 1:
+ print fbrec.plc_pcuid, fbrec.version, fbrec.versions[-2].version
+ fbrec.version = fbrec.versions[-2].version + 1
+ fbrec.flush()
+
+session.flush()
if type(host) == type(""): host = [host]
# get the node(s) info
- nodes = self.api.GetNodes(self.auth,host,["hostname","ssh_rsa_key","nodenetwork_ids"])
+ nodes = self.api.GetNodes(self.auth,host,["hostname","ssh_rsa_key","interface_ids"])
# for each node's node network, update the self.nodenetworks cache
nodenetworks = []
for node in nodes:
- for net in node["nodenetwork_ids"]:
+ for net in node["interface_ids"]:
nodenetworks.append(net)
- plcnodenetworks = self.api.GetNodeNetworks(self.auth,nodenetworks,["nodenetwork_id","ip"])
+ plcnodenetworks = self.api.GetInterfaces(self.auth,nodenetworks,["interface_id","ip"])
for n in plcnodenetworks:
- self.nodenetworks[n["nodenetwork_id"]]=n
+ self.nodenetworks[n["interface_id"]]=n
return nodes
def _record_from_node(self, node, nokey_list=None):
host = node['hostname']
key = node['ssh_rsa_key']
- nodenetworks = node['nodenetwork_ids']
+ nodenetworks = node['interface_ids']
if len(nodenetworks)==0: return (host, None, None, None)
# the [0] subscript to node['interface_ids'] means
fb = None
+class ExceptionDoubleSSHError(Exception): pass
+
class NodeConnection:
def __init__(self, connection, node, config):
self.node = node
if ret != 0:
print "\tFAILED TWICE"
#sys.exit(1)
- raise Exception("Failed twice trying to login with updated ssh host key")
+ raise ExceptionDoubleSSHError("Failed twice trying to login with updated ssh host key")
t1 = time.time()
# KILL any already running servers.
self.session = PlanetLabSession(self.hostname, False, True)
else:
self.session = PlanetLabSession(self.hostname, config.nosetup, config.verbose)
- except Exception, e:
+ except ExceptionDoubleSSHError, e:
msg = "ERROR setting up session for %s" % self.hostname
print msg
+ return False
+ except Exception, e:
traceback.print_exc()
email_exception(msg)
return False
try:
time.sleep(self.session.timeout*5)
conn = self.session.get_connection(config)
+ except EOFError:
+ # failed twice... no need to report this really, it's just in a
+ # weird state...
+ return False
except:
traceback.print_exc()
email_exception(self.hostname)
]:
sequences.update({n : "restart_bootmanager_boot"})
- # conn.restart_bootmanager('rins')
+ # conn.restart_bootmanager('reinstall')
for n in [ "bminit-cfg-auth-getplc-installinit-validate-exception-modulefail-update-debug-done",
"bminit-cfg-auth-getplc-update-installinit-validate-exception-modulefail-update-debug-done",
"bminit-cfg-auth-getplc-installinit-validate-bmexceptmount-exception-noinstall-update-debug-done",
# repair_node_keys
sequences.update({"bminit-cfg-auth-bootcheckfail-authfail-exception-update-bootupdatefail-authfail-debug-done": "repair_node_keys"})
- # conn.restart_node('rins')
+ # conn.restart_node('reinstall')
for n in ["bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-exception-chrootfail-update-debug-done",
"bminit-cfg-auth-getplc-update-installinit-validate-rebuildinitrd-netcfg-update3-disk-update4-exception-chrootfail-update-debug-done",
"bminit-cfg-auth-getplc-hardware-installinit-installdisk-installbootfs-installcfg-exception-chrootfail-update-debug-done",
if type(conn) == type(False): return False
#if forced_action == "reboot":
- # conn.restart_node('rins')
+ # conn.restart_node('reinstall')
# return True
boot_state = conn.get_boot_state()
conn.restart_bootmanager('boot')
elif sequences[s] == "restart_bootmanager_rins":
print "...Restarting BootManager.py on %s "%hostname
- conn.restart_bootmanager('rins')
+ conn.restart_bootmanager('reinstall')
elif sequences[s] == "restart_node_rins":
- conn.restart_node('rins')
+ conn.restart_node('reinstall')
elif sequences[s] == "restart_node_boot":
conn.restart_node('boot')
elif sequences[s] == "repair_node_keys":
if conn.compare_and_repair_nodekeys():
# the keys either are in sync or were forced in sync.
# so try to reboot the node again.
- conn.restart_bootmanager('rins')
+ conn.restart_bootmanager('reinstall')
pass
else:
# there was some failure to synchronize the keys.
args['hostname'] = hostname
args['network_config'] = nodenet_str
- args['nodenetwork_id'] = net['nodenetwork_id']
+ args['interface_id'] = net['interface_id']
sitehist.sendMessage('baddns_notice', **args)
plc_pcu_stats = Field(PickleType,default=None)
acts_as_versioned(ignore=['date_checked'])
+
+class PlcPCU2(Entity):
+ pcu_id = Field(Integer,primary_key=True)
+ date_checked = Field(DateTime,default=datetime.now)
+
+ site_id = Field(Integer, default=0)
+ protocol = Field(String, default=None)
+ node_ids = Field(PickleType,default=None)
+ ports = Field(PickleType,default=None)
+ hostname = Field(String, default=None)
+ ip = Field(String, default=None)
+ username = Field(String, default=None)
+ password = Field(String, default=None)
+ model = Field(String, default=None)
+ notes = Field(String, default=None)
+
+ acts_as_versioned(ignore=['date_checked'])
for net in node["interface_ids"]:
nodenetworks.append(net)
- plcnodenetworks = self.api.GetInterfaces(self.auth,nodenetworks,["nodenetwork_id","ip"])
+ plcnodenetworks = self.api.GetInterfaces(self.auth,nodenetworks,["interface_id","ip"])
for n in plcnodenetworks:
- self.nodenetworks[n["nodenetwork_id"]]=n
+ self.nodenetworks[n["interface_id"]]=n
return nodes
def _record_from_node(self, node, nokey_list=None):
except:
print "ERROR:"
- email_exception(nodename)
+ email_exception(str(nodename))
print traceback.print_exc()
pass
echo ' "bmlog":"'`ls /tmp/bm.log`'",'
echo ' "bootcd_version":"'`cat /mnt/cdrom/bootme/ID`'",'
echo ' "nm_status":"'`ps ax | grep nm.py | grep -v grep`'",'
- echo ' "fs_status":"'`touch /var/log/monitor 2>&1`'",'
+ echo ' "fs_status":"'`touch /var/log/monitor 2>&1 ; if [ -d /vservers/ ] ; then touch /vservers/monitor.log 2>&1 ; fi ; grep proc /proc/mounts | grep ro,`'",'
echo ' "dns_status":"'`host boot.planet-lab.org 2>&1`'",'
echo ' "princeton_comon_dir":"'`ls -d /vservers/princeton_comon`'",'
ID=`grep princeton_comon /etc/passwd | awk -F : '{if ( $3 > 500 ) { print $3}}'`
echo ' "princeton_comon_running":"'`ls -d /proc/virtual/$ID`'",'
echo ' "princeton_comon_procs":"'`vps ax | grep $ID | grep -v grep | wc -l`'",'
+ echo ' "rpm_version":"'`rpm -q NodeManager`'",'
echo "}"
EOF """)
'nm_status' : '',
'fs_status' : '',
'dns_status' : '',
+ 'rpm_version' : '',
'princeton_comon_dir' : "",
'princeton_comon_running' : "",
'princeton_comon_procs' : "", 'ssh_portused' : None})
print traceback.print_exc()
sys.exit(1)
+ print "RPMVERSION: %s %s" % (nodename, values['rpm_version'])
### RUN SSH ######################
b_getbootcd_id = True
You may update the node's network information at the link below:
- https://www.planet-lab.org/db/nodes/node_networks.php?id=%(nodenetwork_id)s
+ https://www.planet-lab.org/db/nodes/node_networks.php?id=%(interface_id)s
If you have any questions, please feel free to contact us at PlanetLab Support (support@planet-lab.org).
def __repr__(self):
return self.api.__repr__()
-api = PLC(auth.auth, auth.server)
class CachedPLC(PLC):
return run_or_returncached
+api = PLC(auth.auth, auth.server)
+cacheapi = CachedPLC(auth.auth, auth.server)
+
def getAPI(url):
return xmlrpclib.Server(url, verbose=False, allow_none=True)
l_nodes = [ s.plc_node_stats for s in dbnodes ]
print "plcpcu"
- dbpcus = PlcPCU.query.all()
- l_pcus = [ s.plc_pcu_stats for s in dbpcus ]
+ dbpcus = PlcPCU2.query.all()
+ l_pcus = []
+ for s in dbpcus:
+ pcu = {}
+ for k in ['username', 'protocol', 'node_ids', 'ip',
+ 'pcu_id', 'hostname', 'site_id', 'notes',
+ 'model', 'password', 'ports']:
+ pcu[k] = getattr(s, k)
+ l_pcus.append(pcu)
+ #l_pcus = [ s.plc_pcu_stats for s in dbpcus ]
print "dsites_from_lsites"
(d_sites,id2lb) = dsites_from_lsites(l_sites)
print "sync pcus"
for pcu in l_pcus:
- dbpcu = PlcPCU.findby_or_create(pcu_id=pcu['pcu_id'])
+ dbpcu = PlcPCU2.findby_or_create(pcu_id=pcu['pcu_id'])
dbpcu.date_checked = datetime.now()
- dbpcu.plc_pcu_stats = pcu
- deleteExtra(l_pcus, PlcPCU, 'pcu_id', 'pcu_id')
+ for key in pcu.keys():
+ print "setting %s = %s" % (key, pcu[key])
+ setattr(dbpcu, key, pcu[key])
+
+ deleteExtra(l_pcus, PlcPCU2, 'pcu_id', 'pcu_id')
deleteExtra(l_pcus, HistoryPCURecord, 'plc_pcuid', 'pcu_id')
deleteExtra(l_pcus, FindbadPCURecord, 'plc_pcuid', 'pcu_id')
session.flush()
if config.rins:
print "Setting %s to rins" % node
- api.UpdateNode(node, {'boot_state' : 'rins'})
+ api.UpdateNode(node, {'boot_state' : 'reinstall'})
if config.backoff:
print "Enabling Slices & Slice Creation for %s" % node
def logic():
- plc.nodeBootState(host, 'rins')
+ plc.nodeBootState(host, 'reinstall')
node_end_record(host)
def main(hostnames, sitenames):
'version' : node['version']})
nnets = api.GetInterfaces(node['interface_ids'])
for nnet in nnets:
- del nnet['nodenetwork_id']
+ del nnet['interface_id']
del nnet['nodenetwork_setting_ids']
api06.AddNodeNetwork(id, nnet)
print ""
for h in d_nodes:
host = d_nodes[h]
for nw_id in host['interface_ids']:
- l_nw = plc.getNodeNetworks({'nodenetwork_id': host['interface_ids']})
+ l_nw = plc.getNodeNetworks({'interface_id': host['interface_ids']})
bwlimit[h] = []
for nw in l_nw:
if nw['bwlimit'] != None and nw['bwlimit'] < 500000:
print "len of nn entries with ip: %s == %s " % ( nodenet['ip'], len(nnet2) )
for nn2 in nnet2:
n2 = api.GetNodes(nn2['node_id'])
- print "\t%d node is attached to nodenetwork %s" % ( len(n2), nn2['nodenetwork_id'] )
+ print "\t%d node is attached to nodenetwork %s" % ( len(n2), nn2['interface_id'] )
if len(n2) != 0 :
n2 = n2[0]
print
#print "host %s : %s" % (n2['hostname'], n2['node_id'])
else:
pass
- #print nn2['nodenetwork_id']
- #api.DeleteNodeNetwork(nn2['nodenetwork_id'])
+ #print nn2['interface_id']
+ #api.DeleteNodeNetwork(nn2['interface_id'])
else:
nnids = util.file.getListFromFile('nnids.txt')
nnids = [ int(i) for i in nnids]
for nn2 in nnet2:
n2 = api.GetNodes(nn2['node_id'])
if len(n2) == 0 :
- print "\t%d node is attached to nodenetwork %s %s" % ( len(n2), nn2['nodenetwork_id'] , nn2['ip']),
+ print "\t%d node is attached to nodenetwork %s %s" % ( len(n2), nn2['interface_id'] , nn2['ip']),
netlist = api.GetInterfaces({'ip' : nn2['ip']})
if len(netlist) != 1: