if not tasks:
if self.verbose:
duration = datetime.now() - begin
- print "total completer {} {}s".format(self.message,
- int(duration.total_seconds()))
+ print("total completer {} {}s".format(self.message,
+ int(duration.total_seconds())))
return True
if datetime.now() > timeout:
for task in tasks:
task.failure_epilogue()
return False
if self.verbose:
- print '{}s..'.format(period_seconds),
+ print('{}s..'.format(period_seconds), end=' ')
time.sleep(period_seconds)
# in case we're empty
return True
def run (self, silent):
result = self.actual_run()
if silent:
- print '+' if result else '.',
+ print('+' if result else '.', end=' ')
sys.stdout.flush()
else:
- print self.message(), "->", "OK" if result else "KO"
+ print(self.message(), "->", "OK" if result else "KO")
return result
def message (self):
return "you-need-to-redefine-message"
def failure_epilogue (self):
- print "you-need-to-redefine-failure_epilogue"
+ print("you-need-to-redefine-failure_epilogue")
# random result
class TaskTest (CompleterTask):
return "Task {} - delay was {}s".format(self.counter, self.delay)
def failure_epilogue (self):
- print "BOTTOM LINE: FAILURE with task ({})".format(self.counter)
+ print("BOTTOM LINE: FAILURE with task ({})".format(self.counter))
def main ():
import sys
if len(sys.argv) != 6:
- print "Usage: <command> number_tasks max_random timeout_s silent_s period_s"
+ print("Usage: <command> number_tasks max_random timeout_s silent_s period_s")
sys.exit(1)
[number, max, timeout, silent, period] = [ int(x) for x in sys.argv[1:]]
tasks = [ TaskTest(max) for i in range(number)]
success = Completer(tasks,verbose=True).run(timedelta(seconds=timeout),
timedelta(seconds=silent),
timedelta(seconds=period))
- print "OVERALL",success
+ print("OVERALL",success)
if __name__ == '__main__':
main()
-#!/usr/bin/python -u
+#!/usr/bin/python3 -u
# Thierry Parmentelat <thierry.parmentelat@inria.fr>
# Copyright (C) 2010 INRIA
#
# using a hostname or an IP
import socket
-import xmlrpclib
+import xmlrpc.client
import traceback
class PlcapiUrlScanner:
def __init__ (self, auth, hostname=None, ip=None, verbose=False):
self.auth = auth
if not hostname and not ip:
- raise Exception, "PlcapiUrlScanner needs _some_ input"
+ raise Exception("PlcapiUrlScanner needs _some_ input")
if hostname:
if not ip:
try:
def try_url (self, url):
try:
- xmlrpclib.ServerProxy (url, verbose=self.verbose, allow_none=True).GetNodes(self.auth)
- print 'YES', url
+ xmlrpc.client.ServerProxy (url, verbose=self.verbose, allow_none=True).GetNodes(self.auth)
+ print('YES', url)
return True
- except xmlrpclib.ProtocolError as e:
- print '... (http error {})'.format(e.errcode), url
+ except xmlrpc.client.ProtocolError as e:
+ print('... (http error {})'.format(e.errcode), url)
return False
except Exception as e:
- print '---', type(e).__name__, url, e
+ print('---', type(e).__name__, url, e)
if self.verbose:
traceback.print_exc()
return False
import re
import traceback
import subprocess
-import commands
import socket
from optparse import OptionParser
import utils
from TestSsh import TestSsh
from TestMapper import TestMapper
+from functools import reduce
# too painful to propagate this cleanly
-verbose=None
+verbose = None
def header (message, banner=True):
if not message: return
- if banner: print "===============",
- print message
+ if banner:
+ print("===============", end=' ')
+ print(message)
sys.stdout.flush()
-def timestamp_sort(o1, o2):
- return o1.timestamp-o2.timestamp
+def timestamp_key(o): return o.timestamp
def short_hostname (hostname):
return hostname.split('.')[0]
# instances, that go undetected through sensing
class Starting:
- location='/root/starting'
+ location = '/root/starting'
+
def __init__ (self):
self.tuples=[]
def load (self):
- try: self.tuples=[line.strip().split('@')
- for line in file(Starting.location).readlines()]
- except: self.tuples=[]
+ try:
+ with open(Starting.location) as starting:
+ self.tuples = [line.strip().split('@') for line in starting.readlines()]
+ except:
+ self.tuples = []
def vnames (self) :
self.load()
- return [ x for (x,_) in self.tuples ]
+ return [ x for (x, _) in self.tuples ]
def add (self, vname, bname):
if not vname in self.vnames():
with open(Starting.location, 'a') as out:
- out.write("%s@%s\n"%(vname,bname))
+ out.write("{}@{}\n".format(vname, bname))
def delete_vname (self, vname):
self.load()
if vname in self.vnames():
- f=file(Starting.location,'w')
- for (v, b) in self.tuples:
- if v != vname: f.write("%s@%s\n"%(v,b))
- f.close()
+ with open(Starting.location, 'w') as f:
+ for (v, b) in self.tuples:
+ if v != vname:
+ f.write("{}@{}\n".format(v, b))
####################
# pool class
self.ip = None
def line(self):
- return "Pooled %s (%s) -> %s"%(self.hostname,self.userdata, self.status)
+ return "Pooled {} ({}) -> {}".format(self.hostname, self.userdata, self.status)
def char (self):
- if self.status==None: return '?'
- elif self.status=='busy': return '+'
- elif self.status=='free': return '-'
- elif self.status=='mine': return 'M'
- elif self.status=='starting': return 'S'
+ if self.status == None: return '?'
+ elif self.status == 'busy': return '+'
+ elif self.status == 'free': return '-'
+ elif self.status == 'mine': return 'M'
+ elif self.status == 'starting': return 'S'
def get_ip(self):
if self.ip: return self.ip
self.substrate = substrate
def list (self, verbose=False):
- for i in self.pool_items: print i.line()
+ for i in self.pool_items: print(i.line())
def line (self):
line = self.message
return self._item(hostname).userdata
def get_ip (self, hostname):
- try: return self._item(hostname).get_ip()
- except: return socket.gethostbyname(hostname)
+ try:
+ return self._item(hostname).get_ip()
+ except:
+ return socket.gethostbyname(hostname)
def set_mine (self, hostname):
try:
self._item(hostname).status='mine'
except:
- print 'WARNING: host %s not found in IP pool %s'%(hostname,self.message)
+ print('WARNING: host %s not found in IP pool %s'%(hostname,self.message))
def next_free (self):
for i in self.pool_items:
def add_starting (self, vname, bname):
Starting().add(vname, bname)
for i in self.pool_items:
- if i.hostname == vname: i.status='mine'
+ if i.hostname == vname:
+ i.status = 'mine'
# load the starting instances from the common file
# remember that might be ours
# return the list of (vname,bname) that are not ours
def load_starting (self):
- starting=Starting()
+ starting = Starting()
starting.load()
- new_tuples=[]
- for (v,b) in starting.tuples:
+ new_tuples = []
+ for (v, b) in starting.tuples:
for i in self.pool_items:
if i.hostname == v and i.status == 'free':
i.status = 'starting'
def release_my_starting (self):
for i in self.pool_items:
if i.status == 'mine':
- Starting().delete_vname (i.hostname)
+ Starting().delete_vname(i.hostname)
i.status = None
def _sense (self):
for item in self.pool_items:
if item.status is not None:
- print item.char(),
+ print(item.char(), end=' ')
continue
if self.check_ping (item.hostname):
item.status = 'busy'
- print '*',
+ print('*', end=' ')
else:
item.status = 'free'
- print '.',
+ print('.', end=' ')
def sense (self):
- print 'Sensing IP pool', self.message,
+ print('Sensing IP pool', self.message, end=' ')
self._sense()
- print 'Done'
+ print('Done')
for (vname,bname) in self.load_starting():
- self.substrate.add_starting_dummy (bname, vname)
- print "After having loaded 'starting': IP pool"
- print self.line()
+ self.substrate.add_starting_dummy(bname, vname)
+ print("After having loaded 'starting': IP pool")
+ print(self.line())
# OS-dependent ping option (support for macos, for convenience)
ping_timeout_option = None
# returns True when a given hostname/ip responds to ping
def check_ping (self, hostname):
if not Pool.ping_timeout_option:
- (status, osname) = commands.getstatusoutput("uname -s")
+ (status, osname) = subprocess.getstatusoutput("uname -s")
if status != 0:
- raise Exception, "TestPool: Cannot figure your OS name"
+ raise Exception("TestPool: Cannot figure your OS name")
if osname == "Linux":
- Pool.ping_timeout_option="-w"
+ Pool.ping_timeout_option = "-w"
elif osname == "Darwin":
- Pool.ping_timeout_option="-t"
+ Pool.ping_timeout_option = "-t"
- command="ping -c 1 %s 1 %s"%(Pool.ping_timeout_option, hostname)
- (status,output) = commands.getstatusoutput(command)
+ command="ping -c 1 {} 1 {}".format(Pool.ping_timeout_option, hostname)
+ (status, output) = subprocess.getstatusoutput(command)
return status == 0
####################
class Box:
- def __init__ (self,hostname):
+ def __init__ (self, hostname):
self.hostname = hostname
self._probed = None
def shortname (self):
return TestSsh(self.hostname, username='root', unknown_host=False)
def reboot (self, options):
self.test_ssh().run("shutdown -r now",
- message="Rebooting %s"%self.hostname,
+ message="Rebooting {}".format(self.hostname),
dry_run=options.dry_run)
def hostname_fedora (self, virt=None):
- result = "%s {"%self.hostname
+ # this truly is an opening bracket
+ result = "{}".format(self.hostname) + " {"
if virt:
- result += "%s-"%virt
- result += "%s %s"%(self.fedora(),self.memory())
+ result += "{}-".format(virt)
+ result += "{} {}".format(self.fedora(), self.memory())
# too painful to propagate this cleanly
global verbose
if verbose:
- result += "-%s" % self.uname()
+ result += "-{}".format(self.uname())
+ # and the matching closing bracket
result += "}"
return result
composite = self.backquote ( probe_argv, trash_err=True )
self._hostname = self._uptime = self._uname = self._fedora = self._memory = "** Unknown **"
if not composite:
- print "root@%s unreachable"%self.hostname
+ print("root@{} unreachable".format(self.hostname))
self._probed = ''
else:
try:
self._fedora = fedora.replace("Fedora release ","f").split(" ")[0]
# translate into Mb
self._memory = int(memory.split()[1])/(1024)
- except:
+ except Exception as e:
import traceback
- print 'BEG issue with pieces',pieces
+ print('BEG issue with pieces')
traceback.print_exc()
- print 'END issue with pieces',pieces
self._probed = self._hostname
return self._probed
return '*unprobed* fedora'
def memory(self):
self.probe()
- if hasattr(self,'_memory') and self._memory: return "%s Mb"%self._memory
+ if hasattr(self,'_memory') and self._memory: return "{} Mb".format(self._memory)
return '*unprobed* memory'
def run(self, argv, message=None, trash_err=False, dry_run=False):
if dry_run:
- print 'DRY_RUN:',
- print " ".join(argv)
+ print('DRY_RUN:', end=' ')
+ print(" ".join(argv))
return 0
else:
header(message)
if not trash_err:
return subprocess.call(argv)
else:
- return subprocess.call(argv, stderr=file('/dev/null','w'))
+ with open('/dev/null', 'w') as null:
+ return subprocess.call(argv, stderr=null)
def run_ssh (self, argv, message, trash_err=False, dry_run=False):
ssh_argv = self.test_ssh().actual_argv(argv)
- result=self.run (ssh_argv, message, trash_err, dry_run=dry_run)
- if result!=0:
- print "WARNING: failed to run %s on %s"%(" ".join(argv),self.hostname)
+ result = self.run (ssh_argv, message, trash_err, dry_run=dry_run)
+ if result != 0:
+ print("WARNING: failed to run {} on {}".format(" ".join(argv), self.hostname))
return result
def backquote (self, argv, trash_err=False):
- # print 'running backquote',argv
+ # in python3 we need to set universal_newlines=True
if not trash_err:
- result= subprocess.Popen(argv,stdout=subprocess.PIPE).communicate()[0]
+ out_err = subprocess.Popen(argv, stdout=subprocess.PIPE,
+ universal_newlines=True).communicate()
else:
- result= subprocess.Popen(argv,stdout=subprocess.PIPE,stderr=file('/dev/null','w')).communicate()[0]
- return result
+ with open('/dev/null', 'w') as null:
+ out_err = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=null,
+ universal_newlines=True).communicate()
+ # only interested in stdout here
+ return out_err[0]
# if you have any shell-expanded arguments like *
# and if there's any chance the command is adressed to the local host
self.pids.append(pid)
def line (self):
- return "== %s == (pids=%r)"%(self.buildname,self.pids)
+ return "== {} == (pids={})".format(self.buildname, self.pids)
class BuildBox (Box):
def __init__ (self, hostname):
def list(self, verbose=False):
if not self.build_instances:
- header ('No build process on %s (%s)'%(self.hostname_fedora(), self.uptime()))
+ header ('No build process on {} ({})'.format(self.hostname_fedora(), self.uptime()))
else:
- header ("Builds on %s (%s)"%(self.hostname_fedora(), self.uptime()))
+ header ("Builds on {} ({})".format(self.hostname_fedora(), self.uptime()))
for b in self.build_instances:
header (b.line(), banner=False)
# inspect box and find currently running builds
def sense(self, options):
- print 'xb',
+ print('xb', end=' ')
pids = self.backquote_ssh(['pgrep','lbuild'], trash_err=True)
if not pids: return
command = ['ps', '-o', 'pid,command'] + [ pid for pid in pids.split("\n") if pid]
# buildname is expansed here
self.add_build(buildname, m.group('pid'))
continue
- header('BuildLxcBox.sense: command %r returned line that failed to match'%command)
- header(">>%s<<"%line)
+ header('BuildLxcBox.sense: command {} returned line that failed to match'.format(command))
+ header(">>{}<<".format(line))
############################################################
class PlcInstance:
def __init__ (self, plcbox, lxcname, pid):
PlcInstance.__init__(self, plcbox)
self.lxcname = lxcname
- self.pid = pid
+ self.pid = pid
def vplcname (self):
return self.lxcname.split('-')[-1]
return self.lxcname.rsplit('-',2)[0]
def line (self):
- msg="== %s =="%(self.vplcname())
- msg += " [=%s]"%self.lxcname
+ msg="== {} ==".format(self.vplcname())
+ msg += " [={}]".format(self.lxcname)
if self.pid==-1: msg+=" not (yet?) running"
- else: msg+=" (pid=%s)"%self.pid
- if self.timestamp: msg += " @ %s"%self.pretty_timestamp()
+ else: msg+=" (pid={})".format(self.pid)
+ if self.timestamp: msg += " @ {}".format(self.pretty_timestamp())
else: msg += " *unknown timestamp*"
return msg
def kill (self):
- command="rsync lxc-driver.sh %s:/root"%self.plc_box.hostname
- commands.getstatusoutput(command)
- msg="lxc container stopping %s on %s"%(self.lxcname, self.plc_box.hostname)
- self.plc_box.run_ssh(['/root/lxc-driver.sh', '-c', 'stop_lxc', '-n', self.lxcname], msg)
+ command="rsync lxc-driver.sh {}:/root".format(self.plc_box.hostname)
+ subprocess.getstatusoutput(command)
+ msg="lxc container stopping {} on {}".format(self.lxcname, self.plc_box.hostname)
+ self.plc_box.run_ssh(['/root/lxc-driver.sh', '-c', 'stop_lxc', '-n', self.lxcname], msg)
self.plc_box.forget(self)
##########
def list(self, verbose=False):
if not self.plc_instances:
- header ('No plc running on %s'%(self.line()))
+ header ('No plc running on {}'.format(self.line()))
else:
- header ("Active plc VMs on %s"%self.line())
- self.plc_instances.sort(timestamp_sort)
+ header ("Active plc VMs on {}".format(self.line()))
+ self.plc_instances.sort(key=timestamp_key)
for p in self.plc_instances:
header (p.line(), banner=False)
def add_lxc (self, lxcname, pid):
for plc in self.plc_instances:
if plc.lxcname == lxcname:
- header("WARNING, duplicate myplc %s running on %s"%\
- (lxcname, self.hostname), banner=False)
+ header("WARNING, duplicate myplc {} running on {}"\
+ .format(lxcname, self.hostname), banner=False)
return
self.plc_instances.append(PlcLxcInstance(self, lxcname, pid))
# a line describing the box
def line(self):
- return "%s [max=%d,free=%d] (%s)"%(self.hostname_fedora(virt="lxc"),
- self.max_plcs,self.free_slots(),
- self.uptime())
+ return "{} [max={},free={}] ({})".format(self.hostname_fedora(virt="lxc"),
+ self.max_plcs, self.free_slots(),
+ self.uptime())
def plc_instance_by_lxcname(self, lxcname):
for p in self.plc_instances:
# essentially shutdown all running containers
def soft_reboot(self, options):
- command="rsync lxc-driver.sh %s:/root"%self.hostname
- commands.getstatusoutput(command)
- self.run_ssh( ['/root/lxc-driver.sh','-c','stop_all'],
- "Stopping all running lxc containers on %s"%self.hostname,
+ command="rsync lxc-driver.sh {}:/root".format(self.hostname)
+ subprocess.getstatusoutput(command)
+ self.run_ssh( ['/root/lxc-driver.sh','-c','stop_all'],
+ "Stopping all running lxc containers on {}".format(self.hostname),
dry_run=options.dry_run)
# sense is expected to fill self.plc_instances with PlcLxcInstance's
# to describe the currently running VM's
def sense(self, options):
- print "xp",
- command="rsync lxc-driver.sh %s:/root"%self.hostname
- commands.getstatusoutput(command)
- command=['/root/lxc-driver.sh', '-c', 'sense_all']
+ print("xp", end=' ')
+ command = "rsync lxc-driver.sh {}:/root".format(self.hostname)
+ subprocess.getstatusoutput(command)
+ command = ['/root/lxc-driver.sh', '-c', 'sense_all']
lxc_stat = self.backquote_ssh (command)
- for lxc_line in lxc_stat.split("\n"):
+ for lxc_line in lxc_stat.split("\n"):
if not lxc_line: continue
lxcname = lxc_line.split(";")[0]
- pid = lxc_line.split(";")[1]
- timestamp = lxc_line.split(";")[2]
+ pid = lxc_line.split(";")[1]
+ timestamp = lxc_line.split(";")[2]
self.add_lxc(lxcname,pid)
try: timestamp = int(timestamp)
except: timestamp = 0
p = self.plc_instance_by_lxcname(lxcname)
if not p:
- print 'WARNING zombie plc',self.hostname,lxcname
- print '... was expecting',lxcname,'in',[i.lxcname for i in self.plc_instances]
+ print('WARNING zombie plc',self.hostname,lxcname)
+ print('... was expecting',lxcname,'in',[i.lxcname for i in self.plc_instances])
continue
p.set_timestamp(timestamp)
return time.strftime("%Y-%m-%d:%H-%M", time.localtime(self.timestamp))
def line (self):
- msg = "== %s =="%(short_hostname(self.nodename))
- msg += " [=%s]"%self.buildname
- if self.pid: msg += " (pid=%s)"%self.pid
+ msg = "== {} ==".format(short_hostname(self.nodename))
+ msg += " [={}]".format(self.buildname)
+ if self.pid: msg += " (pid={})".format(self.pid)
else: msg += " not (yet?) running"
- if self.timestamp: msg += " @ %s"%self.pretty_timestamp()
+ if self.timestamp: msg += " @ {}".format(self.pretty_timestamp())
else: msg += " *unknown timestamp*"
return msg
def kill(self):
if self.pid == 0:
- print "cannot kill qemu %s with pid==0"%self.nodename
+ print("cannot kill qemu {} with pid==0".format(self.nodename))
return
- msg="Killing qemu %s with pid=%s on box %s"%(self.nodename, self.pid, self.qemu_box.hostname)
- self.qemu_box.run_ssh(['kill', "%s"%self.pid], msg)
+ msg = "Killing qemu {} with pid={} on box {}".format(self.nodename, self.pid, self.qemu_box.hostname)
+ self.qemu_box.run_ssh(['kill', "{}".format(self.pid)], msg)
self.qemu_box.forget(self)
def add_node(self, nodename, pid):
for qemu in self.qemu_instances:
if qemu.nodename == nodename:
- header("WARNING, duplicate qemu %s running on %s"%\
- (nodename,self.hostname), banner=False)
+ header("WARNING, duplicate qemu {} running on {}"\
+ .format(nodename,self.hostname), banner=False)
return
self.qemu_instances.append(QemuInstance(nodename, pid, self))
self.qemu_instances.append(dummy)
def line (self):
- return "%s [max=%d,free=%d] (%s) %s"%(
- self.hostname_fedora(virt="qemu"), self.max_qemus, self.free_slots(),
- self.uptime(), self.driver())
+ return "{} [max={},free={}] ({}) {}"\
+ .format(self.hostname_fedora(virt="qemu"),
+ self.max_qemus, self.free_slots(),
+ self.uptime(), self.driver())
def list(self, verbose=False):
if not self.qemu_instances:
- header ('No qemu on %s'%(self.line()))
+ header ('No qemu on {}'.format(self.line()))
else:
- header ("Qemus on %s"%(self.line()))
- self.qemu_instances.sort(timestamp_sort)
+ header ("Qemus on {}".format(self.line()))
+ self.qemu_instances.sort(key=timestamp_key)
for q in self.qemu_instances:
header (q.line(), banner=False)
matcher=re.compile("\s*(?P<pid>[0-9]+).*-cdrom\s+(?P<nodename>[^\s]+)\.iso")
def sense(self, options):
- print 'qn',
+ print('qn', end=' ')
modules = self.backquote_ssh(['lsmod']).split('\n')
self._driver = '*NO kqemu/kvm_intel MODULE LOADED*'
for module in modules:
if m:
self.add_node(m.group('nodename'), m.group('pid'))
continue
- header('QemuBox.sense: command %r returned line that failed to match'%command)
- header(">>%s<<"%line)
+ header('QemuBox.sense: command {} returned line that failed to match'.format(command))
+ header(">>{}<<".format(line))
########## retrieve alive instances and map to build
live_builds=[]
command = ['grep', '.', '/vservers/*/*/qemu.pid', '/dev/null']
q.set_buildname(buildname)
live_builds.append(buildname)
except:
- print 'WARNING, could not parse pid line',pid_line
+ print('WARNING, could not parse pid line', pid_line)
# retrieve timestamps
if not live_builds:
return
command = ['grep','.']
- command += ['/vservers/%s/*/timestamp'%b for b in live_builds]
+ command += ['/vservers/{}/*/timestamp'.format(b) for b in live_builds]
command += ['/dev/null']
ts_lines = self.backquote_ssh(command, trash_err=True).split('\n')
for ts_line in ts_lines:
continue
q.set_timestamp(timestamp)
except:
- print 'WARNING, could not parse ts line',ts_line
+ print('WARNING, could not parse ts line',ts_line)
####################
class TestInstance:
# second letter : '=' if fine, 'W' for warnings (only ignored steps) 'B' for broken
letter2 = self.second_letter()
double += letter2
- msg = " %s %s =="%(double,self.buildname)
+ msg = " {} {} ==".format(double, self.buildname)
if not self.pids:
pass
elif len(self.pids)==1:
- msg += " (pid=%s)"%self.pids[0]
+ msg += " (pid={})".format(self.pids[0])
else:
- msg += " !!!pids=%s!!!"%self.pids
- msg += " @%s"%self.pretty_timestamp()
+ msg += " !!!pids={}!!!".format(self.pids)
+ msg += " @{}".format(self.pretty_timestamp())
if letter2 != '=':
msg2 = ( ' BROKEN' if letter2 == 'B' else ' WARNING' )
# sometimes we have an empty plcindex
- msg += " [%s="%msg2 + " ".join( [ "%s@%s"%(s,i) if i else s for (i, s) in self.broken_steps ] ) + "]"
+ msg += " [{}=".format(msg2) \
+ + " ".join(["{}@{}".format(s, i) if i else s for (i, s) in self.broken_steps]) \
+ + "]"
return msg
class TestBox(Box):
# can't reboot a vserver VM
self.run_ssh(['pkill', 'run_log'], "Terminating current runs",
dry_run=options.dry_run)
- self.run_ssh(['rm', '-f', Starting.location], "Cleaning %s"%Starting.location,
+ self.run_ssh(['rm', '-f', Starting.location], "Cleaning {}".format(Starting.location),
dry_run=options.dry_run)
def get_test(self, buildname):
self.test_instances.append(TestInstance(buildname, pid))
return
if i.pids:
- print "WARNING: 2 concurrent tests run on same build %s"%buildname
+ print("WARNING: 2 concurrent tests run on same build {}".format(buildname))
i.add_pid(pid)
def add_broken(self, buildname, plcindex, step):
matcher_grep_missing=re.compile ("grep: /root/(?P<buildname>[^/]+)/logs/trace: No such file or directory")
def sense(self, options):
- print 'tm',
+ print('tm', end=' ')
self.starting_ips = [x for x in self.backquote_ssh(['cat',Starting.location], trash_err=True).strip().split('\n') if x]
# scan timestamps on all tests
timestamp = int(timestamp)
t = self.add_timestamp(buildname, timestamp)
except:
- print 'WARNING, could not parse ts line', ts_line
+ print('WARNING, could not parse ts line', ts_line)
# let's try to be robust here -- tests that fail very early like e.g.
# "Cannot make space for a PLC instance: vplc IP pool exhausted", that occurs as part of provision
step = m.group('step')
self.add_broken(buildname, plcindex, step)
continue
- header("TestBox.sense: command %r returned line that failed to match\n%s"%(command,line))
- header(">>%s<<"%line)
+ header("TestBox.sense: command {} returned line that failed to match\n{}".format(command, line))
+ header(">>{}<<".format(line))
pids = self.backquote_ssh (['pgrep', 'run_log'], trash_err=True)
if not pids:
return
- command = ['ls','-ld'] + ["/proc/%s/cwd"%pid for pid in pids.split("\n") if pid]
+ command = ['ls','-ld'] + ["/proc/{}/cwd".format(pid) for pid in pids.split("\n") if pid]
ps_lines = self.backquote_ssh(command).split('\n')
for line in ps_lines:
if not line.strip():
buildname = m.group('buildname')
self.add_running_test(pid, buildname)
continue
- header("TestBox.sense: command %r returned line that failed to match\n%s"%(command,line))
- header(">>%s<<"%line)
+ header("TestBox.sense: command {} returned line that failed to match\n{}".format(command, line))
+ header(">>{}<<".format(line))
def line (self):
msg="running tests"
if not instances:
- header ("No %s on %s"%(msg,self.line()))
+ header ("No {} on {}".format(msg, self.line()))
else:
- header ("%s on %s"%(msg,self.line()))
- instances.sort(timestamp_sort)
+ header ("{} on {}".format(msg, self.line()))
+ instances.sort(sort=timestamp_sort)
for i in instances:
- print i.line()
+ print(i.line())
# show 'starting' regardless of verbose
if self.starting_ips:
- header("Starting IP addresses on %s"%self.line())
+ header("Starting IP addresses on {}".format(self.line()))
self.starting_ips.sort()
for starting in self.starting_ips:
- print starting
+ print(starting)
else:
- header("Empty 'starting' on %s"%self.line())
+ header("Empty 'starting' on {}".format(self.line()))
############################################################
class Options: pass
def summary_line (self):
msg = "["
- msg += " %d xp"%len(self.plc_lxc_boxes)
- msg += " %d tried plc boxes"%len(self.plc_boxes)
+ msg += " {} xp".format(len(self.plc_lxc_boxes))
+ msg += " {} tried plc boxes".format(len(self.plc_boxes))
msg += "]"
return msg
def fqdn (self, hostname):
if hostname.find('.') < 0:
- return "%s.%s" % (hostname, self.domain())
+ return "{}.{}".format(hostname, self.domain())
return hostname
# return True if actual sensing takes place
def sense(self, force=False):
if self._sensed and not force:
return False
- print 'Sensing local substrate...',
+ print('Sensing local substrate...', end=' ')
for b in self.default_boxes:
b.sense(self.options)
- print 'Done'
+ print('Done')
self._sensed = True
return True
plcs = [ self.localize_sfa_rspec(plc, options) for plc in plcs ]
self.list()
return plcs
- except Exception, e:
- print '* Could not provision this test on current substrate','--',e,'--','exiting'
+ except Exception as e:
+ print('* Could not provision this test on current substrate','--',e,'--','exiting')
traceback.print_exc()
sys.exit(1)
all_plc_instances = reduce(lambda x, y: x+y,
[ pb.plc_instances for pb in self.plc_boxes ],
[])
- all_plc_instances.sort(timestamp_sort)
+ all_plc_instances.sort(key=timestamp_key)
try:
plc_instance_to_kill = all_plc_instances[0]
except:
msg += " PLC boxes are full"
if not vplc_hostname:
msg += " vplc IP pool exhausted"
- msg += " %s"%self.summary_line()
- raise Exception,"Cannot make space for a PLC instance:" + msg
+ msg += " {}".format(self.summary_line())
+ raise Exception("Cannot make space for a PLC instance:" + msg)
freed_plc_boxname = plc_instance_to_kill.plc_box.hostname
freed_vplc_hostname = plc_instance_to_kill.vplcname()
- message = 'killing oldest plc instance = %s on %s' % (plc_instance_to_kill.line(),
- freed_plc_boxname)
+ message = 'killing oldest plc instance = {} on {}'\
+ .format(plc_instance_to_kill.line(), freed_plc_boxname)
plc_instance_to_kill.kill()
# use this new plcbox if that was the problem
if not plc_boxname:
#### compute a helpful vserver name
# remove domain in hostname
vplc_short = short_hostname(vplc_hostname)
- vservername = "%s-%d-%s" % (options.buildname, plc['index'], vplc_short)
- plc_name = "%s_%s" % (plc['name'], vplc_short)
+ vservername = "{}-{}-{}".format(options.buildname, plc['index'], vplc_short)
+ plc_name = "{}_{}".format(plc['name'], vplc_short)
- utils.header('PROVISION plc %s in box %s at IP %s as %s' % \
- (plc['name'], plc_boxname, vplc_hostname, vservername))
+ utils.header('PROVISION plc {} in box {} at IP {} as {}'\
+ .format(plc['name'], plc_boxname, vplc_hostname, vservername))
#### apply in the plc_spec
# # informative
all_qemu_instances = reduce(lambda x, y: x+y,
[ qb.qemu_instances for qb in self.qemu_boxes ],
[])
- all_qemu_instances.sort(timestamp_sort)
+ all_qemu_instances.sort(key=timestamp_key)
try:
qemu_instance_to_kill = all_qemu_instances[0]
except:
msg += " QEMU boxes are full"
if not vnode_hostname:
msg += " vnode IP pool exhausted"
- msg += " %s"%self.summary_line()
- raise Exception,"Cannot make space for a QEMU instance:"+msg
+ msg += " {}".format(self.summary_line())
+ raise Exception("Cannot make space for a QEMU instance:"+msg)
freed_qemu_boxname = qemu_instance_to_kill.qemu_box.hostname
freed_vnode_hostname = short_hostname(qemu_instance_to_kill.nodename)
# kill it
- message = 'killing oldest qemu node = %s on %s' % (qemu_instance_to_kill.line(),
- freed_qemu_boxname)
+ message = 'killing oldest qemu node = {} on {}'.format(qemu_instance_to_kill.line(),
+ freed_qemu_boxname)
qemu_instance_to_kill.kill()
# use these freed resources where needed
if not qemu_boxname:
nodemap.update(self.network_settings())
maps.append( (nodename, nodemap) )
- utils.header("PROVISION node %s in box %s at IP %s with MAC %s" % \
- (nodename, qemu_boxname, vnode_hostname, mac))
+ utils.header("PROVISION node {} in box {} at IP {} with MAC {}"\
+ .format(nodename, qemu_boxname, vnode_hostname, mac))
return test_mapper.map({'node':maps})[0]
plc['sfa']['settings']['SFA_AGGREGATE_HOST'] = plc['settings']['PLC_DB_HOST']
plc['sfa']['settings']['SFA_SM_HOST'] = plc['settings']['PLC_DB_HOST']
plc['sfa']['settings']['SFA_DB_HOST'] = plc['settings']['PLC_DB_HOST']
- plc['sfa']['settings']['SFA_PLC_URL'] = 'https://%s:443/PLCAPI/' % plc['settings']['PLC_API_HOST']
- return plc
+ plc['sfa']['settings']['SFA_PLC_URL'] = 'https://{}:443/PLCAPI/'.format(plc['settings']['PLC_API_HOST'])
+ return plc
#################### release:
def release(self, options):
return b
except:
pass
- print "Could not find box %s" % boxname
+ print("Could not find box {}".format(boxname))
return None
# deal with the mix of boxes and names and stores the current focus
if not isinstance(box, Box):
box = self.get_box(box)
if not box:
- print 'Warning - could not handle box',box
+ print('Warning - could not handle box',box)
self.focus_all.append(box)
# elaborate by type
self.focus_build = [ x for x in self.focus_all if isinstance(x, BuildBox) ]
self.focus_qemu = [ x for x in self.focus_all if isinstance(x, QemuBox) ]
def list_boxes(self):
- print 'Sensing',
+ print('Sensing', end=' ')
for box in self.focus_all:
box.sense(self.options)
- print 'Done'
+ print('Done')
for box in self.focus_all:
box.list(self.options.verbose)
box.reboot(self.options)
def sanity_check(self):
- print 'Sanity check'
+ print('Sanity check')
self.sanity_check_plc()
self.sanity_check_qemu()
if node not in hash:
hash[node] = 0
hash[node]+=1
- for (node,count) in hash.items():
+ for (node,count) in list(hash.items()):
if count!=1:
- print 'WARNING - duplicate node', node
+ print('WARNING - duplicate node', node)
####################
#
# wrapper to xmlrpc server, that support dry-run commands
# we dont want to have to depend on PLCAPI, so:
-import xmlrpclib
+import xmlrpc.client
# the default value is for the dry run mode
server_methods = [ ('GetNodes' , []),
class TestApiserver:
class Callable:
- def __init__(self,server,dry_run,method,defaults):
- self.server=server
- self.dry_run=dry_run
- self.method=method
- self.defaults=defaults
+ def __init__(self, server, dry_run, method, defaults):
+ self.server = server
+ self.dry_run = dry_run
+ self.method = method
+ self.defaults = defaults
def __call__ (self, *args):
if self.dry_run:
- print "dry_run:",self.method,
- if len(args)>0 and type(args[0])==type({}) and args[0].has_key('AuthMethod'):
- print '<auth>',
- args=args[1:]
- print '(',args,')'
+ print("dry_run:",self.method, end=' ')
+ if len(args) > 0 and type(args[0]) == type({}) and 'AuthMethod' in args[0]:
+ print('<auth>', end=' ')
+ args = args[1:]
+ print('(', args, ')')
return self.defaults
else:
- actual_method=getattr(self.server,self.method)
+ actual_method = getattr(self.server, self.method)
return actual_method(*args)
- def __init__(self,url,dry_run=False):
- self.apiserver = xmlrpclib.ServerProxy(url,allow_none=True)
- self.dry_run=dry_run
- for (method,defaults) in server_methods:
- setattr(self,method,TestApiserver.Callable(self.apiserver,dry_run,method,defaults))
+ def __init__(self, url, dry_run=False):
+ self.apiserver = xmlrpc.client.ServerProxy(url, allow_none=True,
+ use_builtin_types=True)
+ self.dry_run = dry_run
+ for method, defaults in server_methods:
+ setattr(self, method, TestApiserver.Callable(self.apiserver, dry_run, method, defaults))
def set_dry_run (self, dry_run):
- self.dry_run=dry_run
- for (method,defaults) in server_methods:
- getattr(self,method).dry_run = dry_run
+ self.dry_run = dry_run
+ for (method, defaults) in server_methods:
+ getattr(self, method).dry_run = dry_run
def has_method (self, methodname):
return methodname in self.apiserver.system.listMethods()
class TestAuthSfa:
def __init__ (self, test_plc, auth_sfa_spec):
- self.test_plc = test_plc
- self.auth_sfa_spec = auth_sfa_spec
+ self.test_plc = test_plc
+ self.auth_sfa_spec = auth_sfa_spec
self.test_ssh = TestSsh(self.test_plc.test_ssh)
# # shortcuts
self.login_base = self.auth_sfa_spec['login_base']
fileconf.close()
utils.header ("(Over)wrote {}".format(file_name))
#
- file_name=dir_name + os.sep + 'sfi_config'
+ file_name=dir_name + os.sep + 'sfi_config'
fileconf=open(file_name,'w')
- SFI_AUTH=self.auth_hrn()
+ SFI_AUTH=self.auth_hrn()
fileconf.write ("SFI_AUTH='{}'".format(SFI_AUTH))
- fileconf.write('\n')
+ fileconf.write('\n')
# default is to run as a PI
- SFI_USER=self.obj_hrn(self.auth_sfa_spec['pi_spec']['name'])
+ SFI_USER=self.obj_hrn(self.auth_sfa_spec['pi_spec']['name'])
fileconf.write("SFI_USER='{}'".format(SFI_USER))
- fileconf.write('\n')
- SFI_REGISTRY='http://{}:{}/'.format(sfa_spec['settings']['SFA_REGISTRY_HOST'], 12345)
+ fileconf.write('\n')
+ SFI_REGISTRY='http://{}:{}/'.format(sfa_spec['settings']['SFA_REGISTRY_HOST'], 12345)
fileconf.write("SFI_REGISTRY='{}'".format(SFI_REGISTRY))
- fileconf.write('\n')
- SFI_SM='http://{}:{}/'.format(sfa_spec['settings']['SFA_SM_HOST'], sfa_spec['sfi-connects-to-port'])
+ fileconf.write('\n')
+ SFI_SM='http://{}:{}/'.format(sfa_spec['settings']['SFA_SM_HOST'], sfa_spec['sfi-connects-to-port'])
fileconf.write("SFI_SM='{}'".format(SFI_SM))
- fileconf.write('\n')
+ fileconf.write('\n')
fileconf.close()
utils.header ("(Over)wrote {}".format(file_name))
def sfi_list (self, options):
"run (as regular user) sfi list (on Registry)"
- return \
+ return \
self.test_plc.run_in_guest(self.sfi_user("list -r {}".format(self.root_hrn()))) == 0 and \
self.test_plc.run_in_guest(self.sfi_user("list {}".format(self.auth_hrn()))) == 0
def sfi_show_site (self, options):
"run (as regular user) sfi show (on Registry)"
- return \
+ return \
self.test_plc.run_in_guest(self.sfi_user("show {}".format(self.auth_hrn()))) == 0
test_plc = TestPlc (test_plc_spec)
test_plc.show()
- print test_plc.host_box
+ print(test_plc.host_box)
from argparse import ArgumentParser
parser = ArgumentParser()
class TestKey:
def __init__ (self, test_plc, key_spec):
- self.test_plc = test_plc
- self.key_spec = key_spec
+ self.test_plc = test_plc
+ self.key_spec = key_spec
self.test_ssh = TestSsh(self.test_plc.test_ssh)
def name(self):
f.write(self.key_spec['key_fields']['key'])
with open(priv,"w") as f:
f.write(self.key_spec['private'])
- os.chmod(priv,0400)
- os.chmod(pub,0444)
+ os.chmod(priv,0o400)
+ os.chmod(pub,0o444)
-#!/usr/bin/python -u
+#!/usr/bin/python3 -u
# Thierry Parmentelat <thierry.parmentelat@inria.fr>
# Copyright (C) 2010 INRIA
else:
try:
self.substeps = sequences[self.internal()]
- except Exception,e:
- print "macro step {} not found in macros.py ({}) - exiting".format(self.display(),e)
+ except Exception as e:
+ print("macro step {} not found in macros.py ({}) - exiting".format(self.display(),e))
raise
def print_doc (self, level=0):
width = tab - level - 2
format = "%%-%ds" % width
line = start + format % self.display()
- print line,
+ print(line, end=' ')
try:
- print self.method.__doc__
+ print(self.method.__doc__)
except:
- print "*** no doc found"
+ print("*** no doc found")
else:
beg_start = level*' ' + '>>> '
end_start = level*' ' + '<<< '
format = "%%-%ds" % width
beg_line = beg_start + format % self.display() + trail*'>'
end_line = end_start + format % self.display() + trail*'<'
- print beg_line
+ print(beg_line)
for step in self.substeps:
Step(step).print_doc(level+1)
- print end_line
+ print(end_line)
# return a list of (name, method) for all native steps involved
def tuples (self):
# just do a listdir, hoping we're in the right directory...
@staticmethod
def list_macros ():
- names= sequences.keys()
+ names= list(sequences.keys())
names.sort()
return names
default_build_url = "git://git.onelab.eu/tests"
def __init__(self):
- self.path = os.path.dirname(sys.argv[0]) or "."
+ self.path = os.path.dirname(sys.argv[0]) or "."
os.chdir(self.path)
def show_env(self, options, message):
def list_steps(self):
if not self.options.verbose:
- print self.steps_message
+ print(self.steps_message)
else:
# steps mentioned on the command line
if self.options.steps:
# try to list macro steps as well
scopes.append ( ("Macro steps", Step.list_macros()) )
for (scope, steps) in scopes:
- print '--------------------', scope
+ print('--------------------', scope)
for step in [step for step in steps if TestPlc.valid_step(step)]:
try:
(step, qualifier) = step.split('@')
def flatten (x):
result = []
for el in x:
- if hasattr(el, "__iter__") and not isinstance(el, basestring):
+ if hasattr(el, "__iter__") and not isinstance(el, str):
result.extend(flatten(el))
else:
result.append(el)
('pldistro', 'arg-pldistro', "onelab", None),
('fcdistro', 'arg-fcdistro', 'f14', None),
):
-# print 'handling',recname
+# print('handling', recname)
path = filename
is_list = isinstance(default, list)
is_bool = isinstance(default, bool)
if not getattr(self.options, recname):
try:
- parsed = file(path).readlines()
+ with open(path) as file:
+ parsed = file.readlines()
if is_list: # lists
parsed = [x.strip() for x in parsed]
else: # strings and booleans
if len(parsed) != 1:
- print "{} - error when parsing {}".format(sys.argv[1], path)
+ print("{} - error when parsing {}".format(sys.argv[1], path))
sys.exit(1)
parsed = parsed[0].strip()
if is_bool:
parsed = parsed.lower() == 'true'
setattr(self.options, recname, parsed)
- except:
+ except Exception as e:
if default != "":
setattr(self.options, recname, default)
else:
- print "Cannot determine", recname
- print "Run {} --help for help".format(sys.argv[0])
+ print("Cannot determine", recname, e)
+ print("Run {} --help for help".format(sys.argv[0]))
sys.exit(1)
# save for next run
all_plc_specs = m.config(all_plc_specs, self.options)
except :
traceback.print_exc()
- print 'Cannot load config {} -- ignored'.format(modulename)
+ print('Cannot load config {} -- ignored'.format(modulename))
raise
# provision on local substrate
all_plc_specs = LocalSubstrate.local_substrate.provision(all_plc_specs, self.options)
# remember substrate IP address(es) for next run
- ips_bplc_file = open('arg-ips-bplc', 'w')
- for plc_spec in all_plc_specs:
- ips_bplc_file.write("{}\n".format(plc_spec['host_box']))
- ips_bplc_file.close()
- ips_vplc_file = open('arg-ips-vplc', 'w')
- for plc_spec in all_plc_specs:
- ips_vplc_file.write("{}\n".format(plc_spec['settings']['PLC_API_HOST']))
- ips_vplc_file.close()
+ with open('arg-ips-bplc', 'w') as ips_bplc_file:
+ for plc_spec in all_plc_specs:
+ ips_bplc_file.write("{}\n".format(plc_spec['host_box']))
+ with open('arg-ips-vplc', 'w') as ips_vplc_file:
+ for plc_spec in all_plc_specs:
+ ips_vplc_file.write("{}\n".format(plc_spec['settings']['PLC_API_HOST']))
# ditto for nodes
- ips_bnode_file = open('arg-ips-bnode', 'w')
- for plc_spec in all_plc_specs:
- for site_spec in plc_spec['sites']:
- for node_spec in site_spec['nodes']:
- ips_bnode_file.write("{}\n".format(node_spec['host_box']))
- ips_bnode_file.close()
- ips_vnode_file = open('arg-ips-vnode','w')
- for plc_spec in all_plc_specs:
- for site_spec in plc_spec['sites']:
- for node_spec in site_spec['nodes']:
- # back to normal (unqualified) form
- stripped = node_spec['node_fields']['hostname'].split('.')[0]
- ips_vnode_file.write("{}\n".format(stripped))
- ips_vnode_file.close()
+ with open('arg-ips-bnode', 'w') as ips_bnode_file:
+ for plc_spec in all_plc_specs:
+ for site_spec in plc_spec['sites']:
+ for node_spec in site_spec['nodes']:
+ ips_bnode_file.write("{}\n".format(node_spec['host_box']))
+ with open('arg-ips-vnode','w') as ips_vnode_file:
+ for plc_spec in all_plc_specs:
+ for site_spec in plc_spec['sites']:
+ for node_spec in site_spec['nodes']:
+ # back to normal (unqualified) form
+ stripped = node_spec['node_fields']['hostname'].split('.')[0]
+ ips_vnode_file.write("{}\n".format(stripped))
# build a TestPlc object from the result, passing options
for spec in all_plc_specs:
while prompting:
msg="{:d} Run step {} on {} [r](un)/d(ry_run)/p(roceed)/s(kip)/q(uit) ? "\
.format(plc_counter, stepname, plcname)
- answer = raw_input(msg).strip().lower() or "r"
+ answer = input(msg).strip().lower() or "r"
answer = answer[0]
if answer in ['s','n']: # skip/no/next
- print '{} on {} skipped'.format(stepname, plcname)
+ print('{} on {} skipped'.format(stepname, plcname))
prompting = False
skip_step = True
elif answer in ['q','b']: # quit/bye
- print 'Exiting'
+ print('Exiting')
return 'FAILURE'
elif answer in ['d']: # dry_run
dry_run = self.options.dry_run
step_result=method(plc_obj)
else:
step_result=method(plc_obj, across_plcs)
- print 'dry_run step ->', step_result
+ print('dry_run step ->', step_result)
self.options.dry_run = dry_run
plc_obj.options.dry_run = dry_run
plc_obj.apiserver.set_dry_run(dry_run)
seconds = int(delay.total_seconds())
duration = str(delay)
# always do this on stdout
- print TRACE_FORMAT.format(**locals())
+ print(TRACE_FORMAT.format(**locals()))
# duplicate on trace_file if provided
if self.options.trace_file:
trace.write(TRACE_FORMAT.format(**locals()))
else:
return 1
except SystemExit:
- print 'Caught SystemExit'
+ print('Caught SystemExit')
return 3
except:
traceback.print_exc()
if __name__ == "__main__":
exit_code = TestMain().main()
- print "TestMain exit code", exit_code
+ print("TestMain exit code", exit_code)
sys.exit(exit_code)
if self.options.verbose:
utils.header("TestMapper/{} : applying rules '{}' on {}"\
.format(type, map_pattern, name))
- for (k,v) in rename_dict.iteritems():
+ for (k,v) in rename_dict.items():
# apply : separator
path = k.split(':')
# step down but last step in path
o = obj
for step in path[:-1]:
- if not o.has_key(step):
+ if step not in o:
o[step] = {}
if self.options.verbose:
utils.header ("WARNING : created step {} in path {} on {} {}"\
# last step is the one for side-effect
step = path[-1]
if self.options.verbose:
- if not o.has_key(step):
+ if step not in o:
utils.header ("WARNING : inserting key {} for path {} on {} {}"\
.format(step, path, type, name))
# apply formatting if '%s' found in the value
if v is None:
- if self.options.verbose: print "TestMapper WARNING - None value - ignored, key=",k
+ if self.options.verbose: print("TestMapper WARNING - None value - ignored, key=",k)
continue
if v.find('%s') >= 0:
v = v % obj[k]
if self.options.verbose:
- print("TestMapper, rewriting {}: {} into {}"\
- .format(name, k, v))
+ print(("TestMapper, rewriting {}: {} into {}"\
+ .format(name, k, v)))
o[step] = v
# only apply first rule
return
# Thierry Parmentelat <thierry.parmentelat@inria.fr>
# Copyright (C) 2010 INRIA
#
-import sys, os, os.path, time, base64
+import sys
+import os, os.path
+import time
+import base64
import utils
from TestUser import TestUser
from TestSsh import TestSsh
from Completer import CompleterTask
-class CompleterTaskNodeSsh (CompleterTask):
- def __init__ (self, hostname, qemuname, local_key, command=None,
+class CompleterTaskNodeSsh(CompleterTask):
+ def __init__(self, hostname, qemuname, local_key, command=None,
boot_state="boot", expected=True, dry_run=False):
self.hostname = hostname
self.qemuname = qemuname
self.command = command if command is not None else "hostname;uname -a"
self.expected = expected
self.dry_run = dry_run
- self.test_ssh = TestSsh (self.hostname, key=self.local_key)
- def run (self, silent):
+ self.test_ssh = TestSsh(self.hostname, key=self.local_key)
+ def run(self, silent):
command = self.test_ssh.actual_command(self.command)
- retcod = utils.system (command, silent=silent, dry_run=self.dry_run)
+ retcod = utils.system(command, silent=silent, dry_run=self.dry_run)
if self.expected:
return retcod == 0
else:
return retcod != 0
- def failure_epilogue (self):
- print "Cannot reach {} in {} mode".format(self.hostname, self.boot_state)
+ def failure_epilogue(self):
+ print("Cannot reach {} in {} mode".format(self.hostname, self.boot_state))
class TestNode:
- def __init__ (self, test_plc, test_site, node_spec):
- self.test_plc = test_plc
- self.test_site = test_site
- self.node_spec = node_spec
+ def __init__(self, test_plc, test_site, node_spec):
+ self.test_plc = test_plc
+ self.test_site = test_site
+ self.node_spec = node_spec
def name(self):
return self.node_spec['node_fields']['hostname']
- def dry_run (self):
+ def dry_run(self):
return self.test_plc.options.dry_run
@staticmethod
- def is_qemu_model (model):
+ def is_qemu_model(model):
return model.find("qemu") >= 0
- def is_qemu (self):
+ def is_qemu(self):
return TestNode.is_qemu_model(self.node_spec['node_fields']['model'])
@staticmethod
- def is_real_model (model):
+ def is_real_model(model):
return not TestNode.is_qemu_model(model)
- def is_real (self):
- return TestNode.is_real_model (self.node_spec['node_fields']['model'])
+ def is_real(self):
+ return TestNode.is_real_model(self.node_spec['node_fields']['model'])
def buildname(self):
return self.test_plc.options.buildname
- def nodedir (self):
+ def nodedir(self):
if self.is_qemu():
return "qemu-{}".format(self.name())
else:
return "real-{}".format(self.name())
# this returns a hostname
- def host_box (self):
- if self.is_real ():
+ def host_box(self):
+ if self.is_real():
return 'localhost'
else:
try:
return 'localhost'
# this returns a TestBoxQemu instance - cached in .test_box_value
- def test_box (self):
+ def test_box(self):
try:
return self.test_box_value
except:
- self.test_box_value = TestBoxQemu (self.host_box(),self.buildname())
+ self.test_box_value = TestBoxQemu(self.host_box(),self.buildname())
return self.test_box_value
- def create_node (self):
+ def create_node(self):
ownername = self.node_spec['owner']
user_spec = self.test_site.locate_user(ownername)
test_user = TestUser(self.test_plc,self.test_site,user_spec)
else:
# print 'USING NEW INTERFACE with separate ip addresses'
# this is for setting the 'dns' stuff that now goes with the node
- server.UpdateNode (userauth, self.name(), self.node_spec['node_fields_nint'])
- interface_id = server.AddInterface (userauth, self.name(),self.node_spec['interface_fields_nint'])
- server.AddIpAddress (userauth, interface_id, self.node_spec['ipaddress_fields'])
+ server.UpdateNode(userauth, self.name(), self.node_spec['node_fields_nint'])
+ interface_id = server.AddInterface(userauth, self.name(),self.node_spec['interface_fields_nint'])
+ server.AddIpAddress(userauth, interface_id, self.node_spec['ipaddress_fields'])
route_fields = self.node_spec['route_fields']
route_fields['interface_id'] = interface_id
- server.AddRoute (userauth, node_id, self.node_spec['route_fields'])
+ server.AddRoute(userauth, node_id, self.node_spec['route_fields'])
pass
# populate network interfaces - others
- if self.node_spec.has_key('extra_interfaces'):
+ if 'extra_interfaces' in self.node_spec:
for interface in self.node_spec['extra_interfaces']:
server.AddInterface(userauth,self.name(), interface['interface_fields'])
- if interface.has_key('settings'):
- for (attribute,value) in interface['settings'].iteritems():
+ if 'settings' in interface:
+ for attribute, value in interface['settings'].items():
# locate node network
interface = server.GetInterfaces(userauth,{'ip':interface['interface_fields']['ip']})[0]
interface_id = interface['interface_id']
# attach value
server.AddInterfaceTag(userauth,interface_id,attribute,value)
- def delete_node (self):
+ def delete_node(self):
# uses the right auth as far as poss.
try:
ownername = self.node_spec['owner']
def bootcd(self):
"all nodes: invoke GetBootMedium and store result locally"
utils.header("Calling GetBootMedium for {}".format(self.name()))
+ # this would clearly belong in the config but, well ..
options = []
if self.is_qemu():
options.append('serial')
options.append('systemd-debug')
encoded = self.test_plc.apiserver.GetBootMedium(self.test_plc.auth_root(),
self.name(), 'node-iso', '', options)
- if (encoded == ''):
- raise Exception, 'GetBootmedium failed'
+ if encoded == '':
+ raise Exception('GetBootmedium failed')
filename = "{}/{}.iso".format(self.nodedir(), self.name())
utils.header('Storing boot medium into {}'.format(filename))
+
+ # xxx discovered with python3, but a long stading issue:
+ # encoded at this point is a str instead of a bytes
+ # Quick & dirty : we convert this explicitly to a bytearray
+ # Longer run : clearly it seems like the plcapi server side should
+ # tag its result with <base64></base64> rather than as a string
+ bencoded = str.encode(encoded)
if self.dry_run():
- print "Dry_run: skipped writing of iso image"
+ print("Dry_run: skipped writing of iso image")
return True
else:
- file(filename,'w').write(base64.b64decode(encoded))
+ # with python3 we need to call decodestring here
+ with open(filename,'wb') as storage:
+ storage.write(base64.decodestring(bencoded))
return True
- def nodestate_reinstall (self):
+ def nodestate_reinstall(self):
"all nodes: mark PLCAPI boot_state as reinstall"
self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
self.name(),{'boot_state':'reinstall'})
return True
- def nodestate_safeboot (self):
+ def nodestate_safeboot(self):
"all nodes: mark PLCAPI boot_state as safeboot"
self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
self.name(),{'boot_state':'safeboot'})
return True
- def nodestate_boot (self):
+ def nodestate_boot(self):
"all nodes: mark PLCAPI boot_state as boot"
self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
self.name(),{'boot_state':'boot'})
return True
- def nodestate_show (self):
+ def nodestate_show(self):
"all nodes: show PLCAPI boot_state"
if self.dry_run():
- print "Dry_run: skipped getting current node state"
+ print("Dry_run: skipped getting current node state")
return True
state = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), self.name(), ['boot_state'])[0]['boot_state']
- print self.name(),':',state
+ print(self.name(),':',state)
return True
def qemu_local_config(self):
target_arch = self.test_plc.apiserver.GetPlcRelease(auth)['build']['target-arch']
conf_filename = "{}/qemu.conf".format(self.nodedir())
if self.dry_run():
- print "dry_run: skipped actual storage of qemu.conf"
+ print("dry_run: skipped actual storage of qemu.conf")
return True
utils.header('Storing qemu config for {} in {}'.format(self.name(), conf_filename))
with open(conf_filename,'w') as file:
file.write('TARGET_ARCH={}\n'.format(target_arch))
return True
- def qemu_clean (self):
+ def qemu_clean(self):
utils.header("Cleaning up qemu for host {} on box {}"\
.format(self.name(),self.test_box().hostname()))
dry_run = self.dry_run()
self.test_box().rmdir(self.nodedir(), dry_run=dry_run)
return True
- def qemu_export (self):
+ def qemu_export(self):
"all nodes: push local node-dep directory on the qemu box"
# if relevant, push the qemu area onto the host box
if self.test_box().is_local():
return True
dry_run = self.dry_run()
- utils.header ("Cleaning any former sequel of {} on {}"\
- .format(self.name(), self.host_box()))
- utils.header ("Transferring configuration files for node {} onto {}"\
- .format(self.name(), self.host_box()))
+ utils.header("Cleaning any former sequel of {} on {}"\
+ .format(self.name(), self.host_box()))
+ utils.header("Transferring configuration files for node {} onto {}"\
+ .format(self.name(), self.host_box()))
return self.test_box().copy(self.nodedir(), recursive=True, dry_run=dry_run) == 0
- def qemu_start (self):
+ def qemu_start(self):
"all nodes: start the qemu instance (also runs qemu-bridge-init start)"
model = self.node_spec['node_fields']['model']
#starting the Qemu nodes before
.format(self.name(), model))
return True
- def qemu_timestamp (self):
+ def qemu_timestamp(self):
"all nodes: start the qemu instance (also runs qemu-bridge-init start)"
test_box = self.test_box()
test_box.run_in_buildname("mkdir -p {}".format(self.nodedir()), dry_run=self.dry_run())
return test_box.run_in_buildname("echo {:d} > {}/timestamp"\
.format(now, self.nodedir()), dry_run=self.dry_run()) == 0
- def start_qemu (self):
+ def start_qemu(self):
test_box = self.test_box()
utils.header("Starting qemu node {} on {}".format(self.name(), test_box.hostname()))
test_box.run_in_buildname("{}/qemu-start-node 2>&1 >> {}/log.txt"\
.format(self.nodedir(), self.nodedir()))
- def list_qemu (self):
+ def list_qemu(self):
utils.header("Listing qemu for host {} on box {}"\
.format(self.name(), self.test_box().hostname()))
command = "{}/qemu-kill-node -l {}".format(self.nodedir(), self.name())
self.test_box().run_in_buildname(command, dry_run=self.dry_run())
return True
- def kill_qemu (self):
+ def kill_qemu(self):
#Prepare the log file before killing the nodes
test_box = self.test_box()
# kill the right processes
self.test_box().run_in_buildname(command, dry_run=self.dry_run())
return True
- def gather_qemu_logs (self):
+ def gather_qemu_logs(self):
if not self.is_qemu():
return True
remote_log = "{}/log.txt".format(self.nodedir())
local_log = "logs/node.qemu.{}.txt".format(self.name())
self.test_box().test_ssh.fetch(remote_log,local_log,dry_run=self.dry_run())
- def keys_clear_known_hosts (self):
+ def keys_clear_known_hosts(self):
"remove test nodes entries from the local known_hosts file"
TestSsh(self.name()).clear_known_hosts()
return True
key = "keys/key_admin.rsa"
return TestSsh(self.name(), buildname=self.buildname(), key=key)
- def check_hooks (self):
+ def check_hooks(self):
extensions = [ 'py','pl','sh' ]
path = 'hooks/node'
- scripts = utils.locate_hooks_scripts ('node '+self.name(), path,extensions)
+ scripts = utils.locate_hooks_scripts('node '+self.name(), path,extensions)
overall = True
for script in scripts:
- if not self.check_hooks_script (script):
+ if not self.check_hooks_script(script):
overall = False
return overall
- def check_hooks_script (self,local_script):
+ def check_hooks_script(self,local_script):
# push the script on the node's root context
script_name = os.path.basename(local_script)
- utils.header ("NODE hook {} ({})".format(script_name, self.name()))
+ utils.header("NODE hook {} ({})".format(script_name, self.name()))
test_ssh = self.create_test_ssh()
test_ssh.copy_home(local_script)
if test_ssh.run("./"+script_name) != 0:
- utils.header ("WARNING: node hooks check script {} FAILED (ignored)"\
- .format(script_name))
+ utils.header("WARNING: node hooks check script {} FAILED (ignored)"\
+ .format(script_name))
#return False
return True
else:
- utils.header ("SUCCESS: node hook {} OK".format(script_name))
+ utils.header("SUCCESS: node hook {} OK".format(script_name))
return True
- def has_libvirt (self):
+ def has_libvirt(self):
test_ssh = self.create_test_ssh()
- return test_ssh.run ("rpm -q --quiet libvirt-client") == 0
+ return test_ssh.run("rpm -q --quiet libvirt-client") == 0
- def _check_system_slice (self, slicename, dry_run=False):
+ def _check_system_slice(self, slicename, dry_run=False):
sitename = self.test_plc.plc_spec['settings']['PLC_SLICE_PREFIX']
vservername = "{}_{}".format(sitename, slicename)
test_ssh = self.create_test_ssh()
return test_ssh.run("virsh --connect lxc:// list | grep -q ' {} '".format(vservername),
dry_run = dry_run) == 0
else:
- (retcod,output) = \
+ retcod, output = \
utils.output_of(test_ssh.actual_command("cat /vservers/{}/etc/slicefamily 2> /dev/null")\
.format(vservername))
# get last line only as ssh pollutes the output
import traceback
import socket
from datetime import datetime, timedelta
-from types import StringTypes
import utils
from Completer import Completer, CompleterTask
def standby(minutes, dry_run):
utils.header('Entering StandBy for {:d} mn'.format(minutes))
if dry_run:
- print 'dry_run'
+ print('dry_run')
else:
time.sleep(60*minutes)
return True
ref_name = method.__name__.replace('_ignore', '').replace('force_', '')
ref_method = TestPlc.__dict__[ref_name]
result = ref_method(self)
- print "Actual (but ignored) result for {ref_name} is {result}".format(**locals())
+ print("Actual (but ignored) result for {ref_name} is {result}".format(**locals()))
return Ignored(result)
name = method.__name__.replace('_ignore', '').replace('force_', '')
ignoring.__name__ = name
'populate', SEP,
'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
- 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
+ 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
'sfa_get_expires', SEPSFA,
'plc_db_dump' , 'plc_db_restore', SEP,
@staticmethod
def _has_sfa_cached(rpms_url):
if os.path.isfile(has_sfa_cache_filename):
- cached = file(has_sfa_cache_filename).read() == "yes"
+ with open(has_sfa_cache_filename) as cache:
+ cached = cache.read() == "yes"
utils.header("build provides SFA (cached):{}".format(cached))
return cached
# warning, we're now building 'sface' so let's be a bit more picky
utils.header("Checking if build provides SFA package...")
retcod = os.system("curl --silent {}/ | grep -q sfa-".format(rpms_url)) == 0
encoded = 'yes' if retcod else 'no'
- with open(has_sfa_cache_filename,'w')as out:
- out.write(encoded)
+ with open(has_sfa_cache_filename,'w') as cache:
+ cache.write(encoded)
return retcod
@staticmethod
TestPlc.default_steps.remove(step)
def __init__(self, plc_spec, options):
- self.plc_spec = plc_spec
+ self.plc_spec = plc_spec
self.options = options
- self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
+ self.test_ssh = TestSsh(self.plc_spec['host_box'], self.options.buildname)
self.vserverip = plc_spec['vserverip']
self.vservername = plc_spec['vservername']
self.url = "https://{}:443/PLCAPI/".format(plc_spec['vserverip'])
- self.apiserver = TestApiserver(self.url, options.dry_run)
+ self.apiserver = TestApiserver(self.url, options.dry_run)
(self.ssh_node_boot_timeout, self.ssh_node_boot_silent) = plc_spec['ssh_node_boot_timers']
(self.ssh_node_debug_timeout, self.ssh_node_debug_silent) = plc_spec['ssh_node_debug_timers']
# define the API methods on this object through xmlrpc
# would help, but not strictly necessary
def connect(self):
- pass
+ pass
def actual_command_in_guest(self,command, backslash=False):
raw1 = self.host_to_guest(command)
return self.yum_check_installed(rpms)
def auth_root(self):
- return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
- 'AuthMethod' : 'password',
- 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
+ return {'Username' : self.plc_spec['settings']['PLC_ROOT_USER'],
+ 'AuthMethod' : 'password',
+ 'AuthString' : self.plc_spec['settings']['PLC_ROOT_PASSWORD'],
'Role' : self.plc_spec['role'],
}
return site
if site['site_fields']['login_base'] == sitename:
return site
- raise Exception,"Cannot locate site {}".format(sitename)
+ raise Exception("Cannot locate site {}".format(sitename))
def locate_node(self, nodename):
for site in self.plc_spec['sites']:
for node in site['nodes']:
if node['name'] == nodename:
return site, node
- raise Exception, "Cannot locate node {}".format(nodename)
+ raise Exception("Cannot locate node {}".format(nodename))
def locate_hostname(self, hostname):
for site in self.plc_spec['sites']:
for node in site['nodes']:
if node['node_fields']['hostname'] == hostname:
return(site, node)
- raise Exception,"Cannot locate hostname {}".format(hostname)
+ raise Exception("Cannot locate hostname {}".format(hostname))
def locate_key(self, key_name):
for key in self.plc_spec['keys']:
if key['key_name'] == key_name:
return key
- raise Exception,"Cannot locate key {}".format(key_name)
+ raise Exception("Cannot locate key {}".format(key_name))
def locate_private_key_from_key_names(self, key_names):
# locate the first avail. key
for slice in self.plc_spec['slices']:
if slice['slice_fields']['name'] == slicename:
return slice
- raise Exception,"Cannot locate slice {}".format(slicename)
+ raise Exception("Cannot locate slice {}".format(slicename))
def all_sliver_objs(self):
result = []
# transform into a dict { 'host_box' -> [ test_node .. ] }
result = {}
for (box,node) in tuples:
- if not result.has_key(box):
+ if box not in result:
result[box] = [node]
else:
result[box].append(node)
# a step for checking this stuff
def show_boxes(self):
'print summary of nodes location'
- for box,nodes in self.get_BoxNodes().iteritems():
- print box,":"," + ".join( [ node.name() for node in nodes ] )
+ for box,nodes in self.get_BoxNodes().items():
+ print(box,":"," + ".join( [ node.name() for node in nodes ] ))
return True
# make this a valid step
def qemu_kill_all(self):
'kill all qemu instances on the qemu boxes involved by this setup'
# this is the brute force version, kill all qemus on that host box
- for (box,nodes) in self.get_BoxNodes().iteritems():
+ for (box,nodes) in self.get_BoxNodes().items():
# pass the first nodename, as we don't push template-qemu on testboxes
nodedir = nodes[0].nodedir()
TestBoxQemu(box, self.options.buildname).qemu_kill_all(nodedir)
# make this a valid step
def qemu_list_all(self):
'list all qemu instances on the qemu boxes involved by this setup'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# this is the brute force version, kill all qemus on that host box
TestBoxQemu(box, self.options.buildname).qemu_list_all()
return True
# kill only the qemus related to this test
def qemu_list_mine(self):
'list qemu instances for our nodes'
- for (box,nodes) in self.get_BoxNodes().iteritems():
+ for (box,nodes) in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.list_qemu()
# kill only the qemus related to this test
def qemu_clean_mine(self):
'cleanup (rm -rf) qemu instances for our nodes'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.qemu_clean()
# kill only the right qemus
def qemu_kill_mine(self):
'kill the qemu instances for our nodes'
- for box,nodes in self.get_BoxNodes().iteritems():
+ for box,nodes in self.get_BoxNodes().items():
# the fine-grain version
for node in nodes:
node.kill_qemu()
"print cut'n paste-able stuff to export env variables to your shell"
# guess local domain from hostname
if TestPlc.exported_id > 1:
- print "export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername'])
+ print("export GUESTHOSTNAME{:d}={}".format(TestPlc.exported_id, self.plc_spec['vservername']))
return True
TestPlc.exported_id += 1
domain = socket.gethostname().split('.',1)[1]
fqdn = "{}.{}".format(self.plc_spec['host_box'], domain)
- print "export BUILD={}".format(self.options.buildname)
- print "export PLCHOSTLXC={}".format(fqdn)
- print "export GUESTNAME={}".format(self.plc_spec['vservername'])
+ print("export BUILD={}".format(self.options.buildname))
+ print("export PLCHOSTLXC={}".format(fqdn))
+ print("export GUESTNAME={}".format(self.plc_spec['vservername']))
vplcname = self.plc_spec['vservername'].split('-')[-1]
- print "export GUESTHOSTNAME={}.{}".format(vplcname, domain)
+ print("export GUESTHOSTNAME={}.{}".format(vplcname, domain))
# find hostname of first node
hostname, qemubox = self.all_node_infos()[0]
- print "export KVMHOST={}.{}".format(qemubox, domain)
- print "export NODE={}".format(hostname)
+ print("export KVMHOST={}.{}".format(qemubox, domain))
+ print("export NODE={}".format(hostname))
return True
# entry point
always_display_keys=['PLC_WWW_HOST', 'nodes', 'sites']
def show_pass(self, passno):
- for (key,val) in self.plc_spec.iteritems():
+ for (key,val) in self.plc_spec.items():
if not self.options.verbose and key not in TestPlc.always_display_keys:
continue
if passno == 2:
self.display_key_spec(key)
elif passno == 1:
if key not in ['sites', 'initscripts', 'slices', 'keys']:
- print '+ ', key, ':', val
+ print('+ ', key, ':', val)
def display_site_spec(self, site):
- print '+ ======== site', site['site_fields']['name']
- for k,v in site.iteritems():
+ print('+ ======== site', site['site_fields']['name'])
+ for k,v in site.items():
if not self.options.verbose and k not in TestPlc.always_display_keys:
continue
if k == 'nodes':
if v:
- print '+ ','nodes : ',
+ print('+ ','nodes : ', end=' ')
for node in v:
- print node['node_fields']['hostname'],'',
- print ''
+ print(node['node_fields']['hostname'],'', end=' ')
+ print('')
elif k == 'users':
if v:
- print '+ users : ',
+ print('+ users : ', end=' ')
for user in v:
- print user['name'],'',
- print ''
+ print(user['name'],'', end=' ')
+ print('')
elif k == 'site_fields':
- print '+ login_base', ':', v['login_base']
+ print('+ login_base', ':', v['login_base'])
elif k == 'address_fields':
pass
else:
- print '+ ',
+ print('+ ', end=' ')
utils.pprint(k, v)
def display_initscript_spec(self, initscript):
- print '+ ======== initscript', initscript['initscript_fields']['name']
+ print('+ ======== initscript', initscript['initscript_fields']['name'])
def display_key_spec(self, key):
- print '+ ======== key', key['key_name']
+ print('+ ======== key', key['key_name'])
def display_slice_spec(self, slice):
- print '+ ======== slice', slice['slice_fields']['name']
- for k,v in slice.iteritems():
+ print('+ ======== slice', slice['slice_fields']['name'])
+ for k,v in slice.items():
if k == 'nodenames':
if v:
- print '+ nodes : ',
+ print('+ nodes : ', end=' ')
for nodename in v:
- print nodename,'',
- print ''
+ print(nodename,'', end=' ')
+ print('')
elif k == 'usernames':
if v:
- print '+ users : ',
+ print('+ users : ', end=' ')
for username in v:
- print username,'',
- print ''
+ print(username,'', end=' ')
+ print('')
elif k == 'slice_fields':
- print '+ fields',':',
- print 'max_nodes=',v['max_nodes'],
- print ''
+ print('+ fields',':', end=' ')
+ print('max_nodes=',v['max_nodes'], end=' ')
+ print('')
else:
- print '+ ',k,v
+ print('+ ',k,v)
def display_node_spec(self, node):
- print "+ node={} host_box={}".format(node['name'], node['host_box']),
- print "hostname=", node['node_fields']['hostname'],
- print "ip=", node['interface_fields']['ip']
+ print("+ node={} host_box={}".format(node['name'], node['host_box']), end=' ')
+ print("hostname=", node['node_fields']['hostname'], end=' ')
+ print("ip=", node['interface_fields']['ip'])
if self.options.verbose:
utils.pprint("node details", node, depth=3)
@staticmethod
def display_mapping_plc(plc_spec):
- print '+ MyPLC',plc_spec['name']
+ print('+ MyPLC',plc_spec['name'])
# WARNING this would not be right for lxc-based PLC's - should be harmless though
- print '+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername'])
- print '+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip'])
+ print('+\tvserver address = root@{}:/vservers/{}'.format(plc_spec['host_box'], plc_spec['vservername']))
+ print('+\tIP = {}/{}'.format(plc_spec['settings']['PLC_API_HOST'], plc_spec['vserverip']))
for site_spec in plc_spec['sites']:
for node_spec in site_spec['nodes']:
TestPlc.display_mapping_node(node_spec)
@staticmethod
def display_mapping_node(node_spec):
- print '+ NODE {}'.format(node_spec['name'])
- print '+\tqemu box {}'.format(node_spec['host_box'])
- print '+\thostname={}'.format(node_spec['node_fields']['hostname'])
+ print('+ NODE {}'.format(node_spec['name']))
+ print('+\tqemu box {}'.format(node_spec['host_box']))
+ print('+\thostname={}'.format(node_spec['node_fields']['hostname']))
# write a timestamp in /vservers/<>.timestamp
# cannot be inside the vserver, that causes vserver .. build to cough
# with the last step (i386) removed
repo_url = self.options.arch_rpms_url
for level in [ 'arch' ]:
- repo_url = os.path.dirname(repo_url)
+ repo_url = os.path.dirname(repo_url)
# invoke initvm (drop support for vs)
script = "lbuild-initvm.sh"
vserver_hostname = socket.gethostbyaddr(self.vserverip)[0]
script_options += " -n {}".format(vserver_hostname)
except:
- print "Cannot reverse lookup {}".format(self.vserverip)
- print "This is considered fatal, as this might pollute the test results"
+ print("Cannot reverse lookup {}".format(self.vserverip))
+ print("This is considered fatal, as this might pollute the test results")
return False
create_vserver="{build_dir}/{script} {script_options} {vserver_name}".format(**locals())
return self.run_in_host(create_vserver) == 0
elif self.options.personality == "linux64":
arch = "x86_64"
else:
- raise Exception, "Unsupported personality {}".format(self.options.personality)
+ raise Exception("Unsupported personality {}".format(self.options.personality))
nodefamily = "{}-{}-{}".format(self.options.pldistro, self.options.fcdistro, arch)
pkgs_list=[]
"run plc-config-tty"
tmpname = '{}.plc-config-tty'.format(self.name())
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['settings'].iteritems():
+ for (var,value) in self.plc_spec['settings'].items():
fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('q\n')
def keys_store(self):
"stores test users ssh keys in keys/"
for key_spec in self.plc_spec['keys']:
- TestKey(self,key_spec).store_key()
+ TestKey(self,key_spec).store_key()
return True
def keys_clean(self):
def delete_all_sites(self):
"Delete all sites in PLC, and related objects"
- print 'auth_root', self.auth_root()
+ print('auth_root', self.auth_root())
sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
for site in sites:
# keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
if site['login_base'] == self.plc_spec['settings']['PLC_SLICE_PREFIX']:
continue
site_id = site['site_id']
- print 'Deleting site_id', site_id
+ print('Deleting site_id', site_id)
self.apiserver.DeleteSite(self.auth_root(), site_id)
return True
"create leases (on reservable nodes only, use e.g. run -c default -c resa)"
now = int(time.time())
grain = self.apiserver.GetLeaseGranularity(self.auth_root())
- print 'API answered grain=', grain
+ print('API answered grain=', grain)
start = (now/grain)*grain
start += grain
# find out all nodes that are reservable
test_site = TestSite(self,site_spec)
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
- if node_spec.has_key('nodegroups'):
+ if 'nodegroups' in node_spec:
nodegroupnames = node_spec['nodegroups']
- if isinstance(nodegroupnames, StringTypes):
+ if isinstance(nodegroupnames, str):
nodegroupnames = [ nodegroupnames ]
for nodegroupname in nodegroupnames:
- if not groups_dict.has_key(nodegroupname):
+ if nodegroupname not in groups_dict:
groups_dict[nodegroupname] = []
groups_dict[nodegroupname].append(test_node.name())
auth = self.auth_root()
overall = True
- for (nodegroupname,group_nodes) in groups_dict.iteritems():
+ for (nodegroupname,group_nodes) in groups_dict.items():
if action == "add":
- print 'nodegroups:', 'dealing with nodegroup',\
- nodegroupname, 'on nodes', group_nodes
+ print('nodegroups:', 'dealing with nodegroup',\
+ nodegroupname, 'on nodes', group_nodes)
# first, check if the nodetagtype is here
tag_types = self.apiserver.GetTagTypes(auth, {'tagname':nodegroupname})
if tag_types:
{'tagname' : nodegroupname,
'description' : 'for nodegroup {}'.format(nodegroupname),
'category' : 'test'})
- print 'located tag (type)', nodegroupname, 'as', tag_type_id
+ print('located tag (type)', nodegroupname, 'as', tag_type_id)
# create nodegroup
nodegroups = self.apiserver.GetNodeGroups(auth, {'groupname' : nodegroupname})
if not nodegroups:
self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
- print 'created nodegroup', nodegroupname, \
- 'from tagname', nodegroupname, 'and value', 'yes'
+ print('created nodegroup', nodegroupname, \
+ 'from tagname', nodegroupname, 'and value', 'yes')
# set node tag on all nodes, value='yes'
for nodename in group_nodes:
try:
self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
except:
traceback.print_exc()
- print 'node', nodename, 'seems to already have tag', nodegroupname
+ print('node', nodename, 'seems to already have tag', nodegroupname)
# check anyway
try:
expect_yes = self.apiserver.GetNodeTags(auth,
'tagname' : nodegroupname},
['value'])[0]['value']
if expect_yes != "yes":
- print 'Mismatch node tag on node',nodename,'got',expect_yes
+ print('Mismatch node tag on node',nodename,'got',expect_yes)
overall = False
except:
if not self.options.dry_run:
- print 'Cannot find tag', nodegroupname, 'on node', nodename
+ print('Cannot find tag', nodegroupname, 'on node', nodename)
overall = False
else:
try:
- print 'cleaning nodegroup', nodegroupname
+ print('cleaning nodegroup', nodegroupname)
self.apiserver.DeleteNodeGroup(auth, nodegroupname)
except:
traceback.print_exc()
def nodes_check_boot_state(self, target_boot_state, timeout_minutes,
silent_minutes, period_seconds = 15):
if self.options.dry_run:
- print 'dry_run'
+ print('dry_run')
return True
class CompleterTaskBootState(CompleterTask):
def message(self):
return "CompleterTaskBootState with node {}".format(self.hostname)
def failure_epilogue(self):
- print "node {} in state {} - expected {}"\
- .format(self.hostname, self.last_boot_state, target_boot_state)
+ print("node {} in state {} - expected {}"\
+ .format(self.hostname, self.last_boot_state, target_boot_state))
timeout = timedelta(minutes=timeout_minutes)
graceout = timedelta(minutes=silent_minutes)
command="ping -c 1 -w 1 {} >& /dev/null".format(self.hostname)
return utils.system(command, silent=silent) == 0
def failure_epilogue(self):
- print "Cannot ping node with name {}".format(self.hostname)
+ print("Cannot ping node with name {}".format(self.hostname))
timeout = timedelta(seconds = timeout_seconds)
graceout = timeout
period = timedelta(seconds = period_seconds)
else:
message = "boot"
completer_message = 'ssh_node_boot'
- local_key = "keys/key_admin.rsa"
+ local_key = "keys/key_admin.rsa"
utils.header("checking ssh access to nodes (expected in {} mode)".format(message))
node_infos = self.all_node_infos()
tasks = [ CompleterTaskNodeSsh(nodename, qemuname, local_key,
def message(self):
return "initscript checker for {}".format(self.test_sliver.name())
def failure_epilogue(self):
- print "initscript stamp {} not found in sliver {}"\
- .format(self.stamp, self.test_sliver.name())
+ print("initscript stamp {} not found in sliver {}"\
+ .format(self.stamp, self.test_sliver.name()))
tasks = []
for slice_spec in self.plc_spec['slices']:
- if not slice_spec.has_key('initscriptstamp'):
+ if 'initscriptstamp' not in slice_spec:
continue
stamp = slice_spec['initscriptstamp']
slicename = slice_spec['slice_fields']['name']
for nodename in slice_spec['nodenames']:
- print 'nodename', nodename, 'slicename', slicename, 'stamp', stamp
+ print('nodename', nodename, 'slicename', slicename, 'stamp', stamp)
site,node = self.locate_node(nodename)
# xxx - passing the wrong site - probably harmless
test_site = TestSite(self, site)
tasks.append(CompleterTaskInitscript(test_sliver, stamp))
return Completer(tasks, message='check_initscripts').\
run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
-
+
def check_initscripts(self):
"check that the initscripts have triggered"
return self.do_check_initscripts()
"delete initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
initscript_name = initscript['initscript_fields']['name']
- print('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name']))
+ print(('Attempting to delete {} in plc {}'.format(initscript_name, self.plc_spec['name'])))
try:
self.apiserver.DeleteInitScript(self.auth_root(), initscript_name)
- print initscript_name, 'deleted'
+ print(initscript_name, 'deleted')
except:
- print 'deletion went wrong - probably did not exist'
+ print('deletion went wrong - probably did not exist')
return True
### manage slices
return plc.locate_sliver_obj(nodename, slicename)
except:
pass
- raise Exception, "Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename)
+ raise Exception("Cannot locate sliver {}@{} among all PLCs".format(nodename, slicename))
# implement this one as a cross step so that we can take advantage of different nodes
# in multi-plcs mode
def message(self):
return "network ready checker for {}".format(self.test_sliver.name())
def failure_epilogue(self):
- print "could not bind port from sliver {}".format(self.test_sliver.name())
+ print("could not bind port from sliver {}".format(self.test_sliver.name()))
sliver_specs = {}
tasks = []
def message(self):
return "System slice {} @ {}".format(slicename, self.test_node.name())
def failure_epilogue(self):
- print "COULD not find system slice {} @ {}".format(slicename, self.test_node.name())
+ print("COULD not find system slice {} @ {}".format(slicename, self.test_node.name()))
timeout = timedelta(minutes=timeout_minutes)
silent = timedelta(0)
period = timedelta(seconds=period_seconds)
try:
self.apiserver.DeleteSite(self.auth_root(),login_base)
except:
- print "Site {} already absent from PLC db".format(login_base)
+ print("Site {} already absent from PLC db".format(login_base))
for spec_name in ['pi_spec','user_spec']:
user_spec = auth_sfa_spec[spec_name]
#print "User {} already absent from PLC db".format(username)
pass
- print "REMEMBER TO RUN sfa_import AGAIN"
+ print("REMEMBER TO RUN sfa_import AGAIN")
return True
def sfa_uninstall(self):
if not os.path.isdir(dirname):
utils.system("mkdir -p {}".format(dirname))
if not os.path.isdir(dirname):
- raise Exception,"Cannot create config dir for plc {}".format(self.name())
+ raise Exception("Cannot create config dir for plc {}".format(self.name()))
return dirname
def conffile(self, filename):
"run sfa-config-tty"
tmpname = self.conffile("sfa-config-tty")
with open(tmpname,'w') as fileconf:
- for (var,value) in self.plc_spec['sfa']['settings'].iteritems():
+ for (var,value) in self.plc_spec['sfa']['settings'].items():
fileconf.write('e {}\n{}\n'.format(var, value))
fileconf.write('w\n')
fileconf.write('R\n')
# (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
# (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
# (1.a)
- print "-------------------- TestPlc.gather_logs : PLC's /var/log"
+ print("-------------------- TestPlc.gather_logs : PLC's /var/log")
self.gather_var_logs()
# (1.b)
- print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
+ print("-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/")
self.gather_pgsql_logs()
# (1.c)
- print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
+ print("-------------------- TestPlc.gather_logs : PLC's /root/sfi/")
self.gather_root_sfi()
# (2)
- print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
+ print("-------------------- TestPlc.gather_logs : nodes's QEMU logs")
for site_spec in self.plc_spec['sites']:
test_site = TestSite(self,site_spec)
for node_spec in site_spec['nodes']:
test_node = TestNode(self, test_site, node_spec)
test_node.gather_qemu_logs()
# (3)
- print "-------------------- TestPlc.gather_logs : nodes's /var/log"
+ print("-------------------- TestPlc.gather_logs : nodes's /var/log")
self.gather_nodes_var_logs()
# (4)
- print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
+ print("-------------------- TestPlc.gather_logs : sample sliver's /var/log")
self.gather_slivers_var_logs()
return True
# uses options.dbname if it is found
try:
name = self.options.dbname
- if not isinstance(name, StringTypes):
+ if not isinstance(name, str):
raise Exception
except:
t = datetime.now()
class TestSite:
def __init__ (self, test_plc, site_spec):
- self.test_plc = test_plc
- self.site_spec = site_spec
+ self.test_plc = test_plc
+ self.site_spec = site_spec
def name(self):
return self.site_spec['site_fields']['login_base']
def create_site (self):
- print self.test_plc.auth_root()
+ print(self.test_plc.auth_root())
self.test_plc.apiserver.AddSite(self.test_plc.auth_root(),
self.site_spec['site_fields'])
self.test_plc.apiserver.AddSiteAddress(self.test_plc.auth_root(),
test_user.add_keys()
def delete_site (self):
- print self.test_plc.auth_root()
+ print(self.test_plc.auth_root())
self.test_plc.apiserver.DeleteSite(self.test_plc.auth_root(), self.name())
return True
return user
if user['user_fields']['email'] == username:
return user
- raise Exception,"Cannot locate user {}".format(username)
+ raise Exception("Cannot locate user {}".format(username))
def locate_node (self, nodename):
for node in self.site_spec['nodes']:
if node['name'] == nodename:
return node
- raise Exception,"Cannot locate node {}".format(nodename)
+ raise Exception("Cannot locate node {}".format(nodename))
def failure_epilogue (self):
if self.expected:
- print "Could not ssh into sliver {}@{}".format(self.slicename, self.hostname)
+ print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname))
else:
- print "Could still ssh into sliver{}@{} (that was expected to be down)"\
- .format(self.slicename, self.hostname)
+ print("Could still ssh into sliver{}@{} (that was expected to be down)"\
+ .format(self.slicename, self.hostname))
class TestSlice:
def __init__ (self, test_plc, test_site, slice_spec):
- self.test_plc = test_plc
+ self.test_plc = test_plc
self.test_site = test_site
- self.slice_spec = slice_spec
+ self.slice_spec = slice_spec
self.test_ssh = TestSsh(self.test_plc.test_ssh)
def name(self):
test_user = TestUser(self,self.test_site,user_spec)
self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
# add initscript code or name as appropriate
- if self.slice_spec.has_key('initscriptcode'):
+ if 'initscriptcode' in self.slice_spec:
iscode = self.slice_spec['initscriptcode']
utils.header("Adding initscript code {} in {}".format(iscode, slice_name))
self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
'initscript_code', iscode)
- elif self.slice_spec.has_key('initscriptname'):
+ elif 'initscriptname' in self.slice_spec:
isname = self.slice_spec['initscriptname']
utils.header("Adding initscript name {} in {}".format(isname, slice_name))
self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
expected = self.test_plc.plc_spec['expected_vsys_tags']
result = set(values) == set(expected)
if not result:
- print 'Check vsys defaults with slice {}'.format(slice_name)
- print 'Expected {}'.format(expected)
- print 'Got {}'.format(values)
+ print('Check vsys defaults with slice {}'.format(slice_name))
+ print('Expected {}'.format(expected))
+ print('Got {}'.format(values))
return result
# just add the nodes and handle tags
def delete_nodes (self):
auth = self.owner_auth()
slice_name = self.slice_name()
- print 'retrieving slice {}'.format(slice_name)
+ print('retrieving slice {}'.format(slice_name))
slice=self.test_plc.apiserver.GetSlices(auth,slice_name)[0]
node_ids=slice['node_ids']
utils.header ("Deleting {} nodes from slice {}"\
command="ls -d {}".format(rootfs))
def failure_epilogue (self):
if expected:
- print "Could not stat {} - was expected to be present".format(rootfs)
+ print("Could not stat {} - was expected to be present".format(rootfs))
else:
- print "Sliver rootfs {} still present - this is unexpected".format(rootfs)
+ print("Sliver rootfs {} still present - this is unexpected".format(rootfs))
utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()),
dry_run=self.dry_run))
return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]
sfi_command += " --xrn {}".format(self.hrn())
for opt in self.slice_spec['register_options']:
sfi_command += " {}".format(opt)
- return self.test_plc.run_in_guest(self.sfi_pi(sfi_command))==0
+ return self.test_plc.run_in_guest(self.sfi_pi(sfi_command))==0
def sfa_renew_slice(self, options):
"run sfi renew (on Aggregates)"
if self.test_plc.run_in_guest (self.sfi_user(sfi_command)) !=0: return
if self.test_plc.test_ssh.fetch(inbox_filename,filename)!=0: return
try:
- with file(filename) as f:
+ with open(filename) as f:
status = json.loads(f.read())
value = status['value']
sliver = value['geni_slivers'][0]
expires = sliver['geni_expires']
- print " * expiration for {} (first sliver) -> {}".format(self.hrn(), expires)
+ print(" * expiration for {} (first sliver) -> {}".format(self.hrn(), expires))
return expires
except:
traceback.print_exc()
# run as pi
def sfa_delete_slice(self,options):
- "run sfi delete"
- self.test_plc.run_in_guest(self.sfi_pi("delete {}".format(self.hrn())))
- return self.test_plc.run_in_guest(self.sfi_pi("remove -t slice {}".format(self.hrn()))) == 0
+ "run sfi delete"
+ self.test_plc.run_in_guest(self.sfi_pi("delete {}".format(self.hrn())))
+ return self.test_plc.run_in_guest(self.sfi_pi("remove -t slice {}".format(self.hrn()))) == 0
def locate_private_key(self):
return self.test_plc.locate_private_key_from_key_names ( [ self.slice_spec['key_name'] ] )
# check the resulting sliver
def ssh_slice_sfa(self, options, timeout_minutes=40, silent_minutes=0, period_seconds=15):
- "tries to ssh-enter the SFA slice"
+ "tries to ssh-enter the SFA slice"
timeout = timedelta(minutes=timeout_minutes)
graceout = timedelta(minutes=silent_minutes)
period = timedelta(seconds=period_seconds)
def __init__(self, test_plc, test_node, test_slice):
self.test_plc = test_plc
- self.test_node = test_node
+ self.test_node = test_node
self.test_slice = test_slice
self.test_ssh = self.create_test_ssh()
try:
(found, privatekey) = self.test_slice.locate_key()
return (found, privatekey)
- except Exception,e:
- print str(e)
+ except Exception as e:
+ print(str(e))
def create_test_ssh(self):
private_key = self.test_slice.locate_private_key()
if not private_key:
- raise Exception,"Cannot find the private key for slice {}".format(self.test_slice.name())
+ raise Exception("Cannot find the private key for slice {}".format(self.test_slice.name()))
return TestSsh (self.test_node.name(), key=private_key, username=self.test_slice.name(),
# so that copies end up in the home dir
buildname=".")
return test_ssh.actual_command("tar -C {} -cf - .".format(dir_to_tar))
def check_hooks (self):
- print 'NOTE: slice hooks check scripts NOT (yet?) run in sudo'
+ print('NOTE: slice hooks check scripts NOT (yet?) run in sudo')
extensions = [ 'py','pl','sh' ]
path = 'hooks/slice/'
scripts = utils.locate_hooks_scripts ('sliver '+self.name(), path,extensions)
def header(self, message):
if not message:
return
- print "===============",message
+ print("===============",message)
sys.stdout.flush()
def run(self, command, message=None, background=False, dry_run=False):
# Copyright (C) 2010 INRIA
#
import os, sys, time
-import xmlrpclib
+import xmlrpc.client
import utils
class TestUser:
def __init__ (self, test_plc, test_site, user_spec):
- self.test_plc = test_plc
- self.test_site = test_site
- self.user_spec = user_spec
+ self.test_plc = test_plc
+ self.test_site = test_site
+ self.user_spec = user_spec
def name(self):
return self.user_spec['user_fields']['email']
# Copyright (C) 2010 INRIA
#
import os, sys, time
-import xmlrpclib
+import xmlrpc.client
import utils
# handle key separately because of embedded whitespace
# hack - the user's pubkey is avail from his hrn
command += " -k {}/{}.pub".format(self.sfi_path(), user_hrn)
- return self.test_plc.run_in_guest(self.sfi_pi(command)) == 0
+ return self.test_plc.run_in_guest(self.sfi_pi(command)) == 0
def sfa_update_user (self,options):
"update a user record using sfi update"
command += " --type user"
command += " --xrn {}".format(user_hrn)
command += " " + " ".join(self.user_spec['update_options'])
- return self.test_plc.run_in_guest(self.sfi_pi(command)) == 0
+ return self.test_plc.run_in_guest(self.sfi_pi(command)) == 0
def sfa_delete_user(self,options):
- "run sfi delete on user record"
+ "run sfi delete on user record"
user_hrn = self.hrn()
command = "remove -t user {}".format(user_hrn)
- return self.test_plc.run_in_guest(self.sfi_pi(command)) == 0
+ return self.test_plc.run_in_guest(self.sfi_pi(command)) == 0
'initscripts': initscripts(options,index),
'slices' : slices(options,index),
'tcp_specs' : tcp_specs(options,index),
- 'sfa' : sfa(options,index),
+ 'sfa' : sfa(options,index),
'leases' : leases (options, index),
# big distros need more time to install nodes
'ssh_node_boot_timers': (40,38),
if __name__ == '__main__':
s = sample_test_plc_spec()
- print 'Sample plc_spec has the following keys'
+ print('Sample plc_spec has the following keys')
for k in sorted(s.keys()):
- print k
+ print(k)
def remove_omf (plc_spec):
for slice in plc_spec['slices']:
if 'omf-friendly' in slice:
- print 'Turning off omf-friendly in slice',slice['slice_fields']['name']
+ print('Turning off omf-friendly in slice',slice['slice_fields']['name'])
del slice['omf-friendly']
return plc_spec
+++ /dev/null
-# Thierry Parmentelat <thierry.parmentelat@inria.fr>
-# Copyright (C) 2010 INRIA
-#
-# a configuration module is expected:
-# (*) to define a config method
-# (*) that takes two arguments
-# (**) the current set of plc_specs as output by the preceding config modules
-# (**) TestMain options field
-# (*) and that returns the new set of plc_specs
-
-# archs & vrefs :
-# current focus is to
-########## myplc
-# (*) run a 32bits myplc
-########## multi-arch
-# (*) run wlab02 as a plain 32bits node
-# (*) try and run 64bits in wlab17 (i.e. bootcd & bootstrapfs)
-# which should be achieved by simply adding this node in the 'x86_64' nodegroup
-# (*) investigate what it takes to have the slivers on wlab17 run 32bits as well
-########## multi-vref
-# (*) define slice 'plain' without secific tuning, that should result in f8-based slivers
-# (*) define slice 'centos' with its vref set to centos5
-########## manual stuff
-# all this would require to
-# (*) install bootcd f8-x86_64
-# (*) install bootstrapfs f8-x86_64
-# (*) install noderepo f8-x86_64
-# (*) install noderepo centos5-i386
-# (*) install noderepo centos5-x86_64
-# (*) install vserver centos5-i386
-# (*) and add that to yumgroups.xml
-########## unclear stuff
-# I'm pretty sure that yum.conf.php still needs hacking, at least for centos5
-########## unclear stuff
-
-onelab="onelab.eu"
-
-# these are real nodes, they dont get started by the framework
-def nodes():
- node02 = {'name':'wlab02',
- 'node_fields': {'hostname': 'wlab02.inria.fr', 'model':'Dell Latitude 830'},
- 'owner' : 'pi',
- 'nodegroups' : 'wifi',
- 'interface_fields': { 'method':'dhcp', 'type' : 'ipv4', 'ip':'138.96.250.162',},
- 'extra_interfaces' : [ { 'interface_fields' : { 'method' : 'dhcp',
- 'type' : 'ipv4',
- 'mac' : '00:1B:77:70:F4:C6',
- 'ip' : '138.96.250.192', },
- 'settings' : { 'essid' : 'guest-inria-sophia',
- 'ifname' : 'wlan0', },
- },
- ],
- }
- node17 = {'name':'wlab17',
- 'node_fields': {'hostname': 'wlab17.inria.fr', 'model':'Dell Latitude 830'},
- 'owner' : 'pi',
- 'nodegroups' : ['wifi','x86_64'] ,
- 'interface_fields': { 'method':'dhcp', 'type' : 'ipv4', 'ip':'138.96.250.177',},
- 'extra_interfaces' : [ { 'interface_fields' : { 'method' : 'dhcp',
- 'type' : 'ipv4',
- 'mac' : '00:1c:bf:51:3c:19',
- 'ip' : '138.96.250.207',},
- 'settings' : { 'essid' : 'guest-inria-sophia',
- 'ifname' : 'wlan0',},
- },
- ],
- }
- node05 = {'name':'wlab05',
- 'node_fields': {'hostname': 'wlab05.inria.fr', 'model':'Dell Latitude 830'},
- 'owner' : 'pi',
- 'nodegroups' : 'wifi',
- 'interface_fields': { 'method':'dhcp', 'type' : 'ipv4', 'ip':'138.96.250.165',},
- 'extra_interfaces' : [ { 'interface_fields' : { 'method' : 'static',
- 'type' : 'ipv4',
- 'mac' : '00:1B:77:70:FC:84',
- 'ip' : '138.96.250.215',
- 'network' : '138.96.0.0',
- 'dns1': '138.96.0.10',
- 'dns2': '138.96.0.11',
- 'broadcast' : '138.96.255.255',
- 'netmask' : '255.255.0.0',
- 'gateway' : '138.96.248.250',},
- 'settings' : { 'essid' : 'guest-inria-sophia',
- 'ifname' : 'wlan0',},
- },
- { 'interface_fields' : { 'method' : 'dhcp',
- 'type' : 'ipv4',
- 'mac' : '00:20:A6:4E:FF:E6',
- 'ip' : '138.96.250.50',
- 'hostname' : 'radio40.inria.fr', },
- 'settings' : { 'essid' : 'guest-inria-sophia',
- 'ifname' : 'wifi0',},
- },
- ],
- }
-
-
- # wlab05 not avail. for now
- return [ node02 , node17 ]
-
-def all_nodenames ():
- return [ node['name'] for node in nodes()]
-
-def users (domain=onelab) :
- return [ {'name' : 'pi', 'keynames' : [ 'key1' ],
- 'user_fields' : {'first_name':'PI', 'last_name':'PI',
- 'enabled':'True',
- 'email':'fake-pi1@%s'%domain,
- 'password':'testpi'},
- 'roles':['pi']},
- {'name' : 'tech', 'keynames' : [ 'key1' ],
- 'user_fields' : {'first_name':'Tech', 'last_name':'Tech',
- 'enabled':'true',
- 'email':'fake-tech1@%s'%domain,
- 'password':'testtech'},
- 'roles':['tech']},
- {'name':'user', 'keynames' : [ 'key1' ],
- 'user_fields' : {'first_name':'User', 'last_name':'User',
- 'enabled':'true',
- 'email':'fake-user1@%s'%domain,
- 'password':'testuser'},
- 'roles':['user']},
- {'name':'techuser', 'keynames' : [ 'key1' ],
- 'user_fields' : {'first_name':'UserTech', 'last_name':'UserTech',
- 'enabled':'true',
- 'email':'fake-tech2@%s'%domain,
- 'password':'testusertech'},
- 'roles':['tech','user']},
- {'name':'pitech', 'keynames' : [ 'key1' ],
- 'user_fields' : {'first_name':'PiTech',
- 'last_name':'PiTech',
- 'enabled':'true',
- 'email':'fake-pi2@%s'%domain,
- 'password':'testusertech'},
- 'roles':['pi','tech']},
- ]
-
-def all_usernames ():
- return [ user['name'] for user in users()]
-
-def sites ():
- return [ {'site_fields' : {'name':'wifisite',
- 'login_base':'wifi',
- 'abbreviated_name':'wifi',
- 'max_slices':100,
- 'url':'http://test.onelab.eu',
- },
- 'address_fields' : {'line1':'route des lucioles',
- 'city':'sophia',
- 'state':'fr',
- 'postalcode':'06902',
- 'country':'france',
- },
- 'users' : users(),
- 'nodes': nodes(),
- }]
-
-##########
-public_key="""ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4jNj8yT9ieEc6nSJz/ESu4fui9WrJ2y/MCfqIZ5WcdVKhBFUYyIenmUaeTduMcSqvoYRQ4QnFR1BFdLG8XR9D6FWZ5zTKUgpkew22EVNeqai4IXeWYKyt1Qf3ehaz9E3o1PG/bmQNIM6aQay6TD1Y4lqXI+eTVXVQev4K2fixySjFQpp9RB4UHbeA8c28yoa/cgAYHqCqlvm9uvpGMjgm/Qa4M+ZeO7NdjowfaF/wF4BQIzVFN9YRhvQ/d8WDz84B5Pr0J7pWpaX7EyC4bvdskxl6kmdNIwIRcIe4OcuIiX5Z9oO+7h/chsEVJWF4vqNIYlL9Zvyhnr0hLLhhuk2bw== root@test.onelab.eu
-"""
-private_key="""-----BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA4jNj8yT9ieEc6nSJz/ESu4fui9WrJ2y/MCfqIZ5WcdVKhBFU
-YyIenmUaeTduMcSqvoYRQ4QnFR1BFdLG8XR9D6FWZ5zTKUgpkew22EVNeqai4IXe
-WYKyt1Qf3ehaz9E3o1PG/bmQNIM6aQay6TD1Y4lqXI+eTVXVQev4K2fixySjFQpp
-9RB4UHbeA8c28yoa/cgAYHqCqlvm9uvpGMjgm/Qa4M+ZeO7NdjowfaF/wF4BQIzV
-FN9YRhvQ/d8WDz84B5Pr0J7pWpaX7EyC4bvdskxl6kmdNIwIRcIe4OcuIiX5Z9oO
-+7h/chsEVJWF4vqNIYlL9Zvyhnr0hLLhhuk2bwIBIwKCAQATY32Yf4NyN93oNd/t
-QIyTuzG0NuLI3W95J/4gI4PAnUDmv6glwRiRO92ynlnnAjqFW/LZ5sGFd4k8YoYU
-sjaa8JJgpwrJBi9y6Fx47/9Tp+ITPqyoliyTXvtqysX0jkaY+I1mNHoTITDkbknZ
-eTma0UOhiKcrMz4qOMwg+kajWsAhIplJXyf0Mio/XuyqjMT4wI/NyGZQ4bGuUjO7
-gj3p+9psOvONsRBW4MV27W5ts9c7HEXg+VJ2PSCEMs+uyzXcdnMJcTb4zQ/+tVxR
-5IMeEuR9ZzDNkDtNF6Nnw5kYcTBNoayzZbUfjcuSmsMklMXr0qJ4qcW9/ONKgBQ9
-6qhDAoGBAPkvSYuF/bxwatEiUKyF97oGDe025h/58aqK1VBD5/BBVqqvbQOeNtR5
-/LerGfFa5D9Jm+6U97gDdq3tH0j95Mo0F00LWq2+vp7U4DTQsiddepzNdbcvSrzT
-NVZ2cnOAlKTHO4hGggShm04n/M5LOzkHtI5TLcIJjw4b5iiIw9EtAoGBAOhjLTds
-Zz8UbXVTeGv8yBGhnjAeHQ5WISN6V5KenB4GIyaYCCcQUOUGqc5nCttlnPLv/GHp
-4DOJ2/0KbwDEwk7HbAtXG2Tv1OkmfcOq9RH19V9lyqynA+zvI6taisCEaMvBlafd
-k+RgXsR+NdLs96RakKt4BtgpuuADoSIryQ+LAoGBAKremNSzpq0Z4tiMjxc3Ssiz
-scc7lnxTnmZQkkWq6C+3xmZpqdaYYByra3ahNlxblTK2IcgroozPLM8I/4KCNnwG
-dmC3VB9eOZF8B3SsXOfLEj+i1GBa8WuJg6kAw4JmzFO70Qz9JfSMErk//c9Jh7IT
-6YYqaIUN3nATIXrhcFTrAoGAVlC5BfUQZ+MEFaKpEiqwthd1RRJ/0h/9rhd/nNvT
-lh+JZhs2OmUlXGGPhy2WUX2DcC1AfCOrC9Qego7YxcVsvizQW/vIWLDaDXSyXp6V
-ilQKrmejDO2Tvmdzpguv4Rs83fdyGcdUMEENQas4kCwhd49aTlEnHRbQYdV2XSY0
-vKECgYEAlhYzfSswIF2h5/hGDLETxgNJ2kD0HIZYh7aud6X6aEYNdJopbfbEhifU
-vTbf8GtvERDoxWEsk9Qp7km8xXfKWdcZtqIwsSmn/ri5d7iyvpIk591YIHSY0dr2
-BO+VyPNWF+kDNI8mSUwi7jLW6liMdhNOmDaSX0+0X8CHtK898xM=
------END RSA PRIVATE KEY-----
-"""
-
-def keys ():
- return [ {'name': 'key1',
- 'private' : private_key,
- 'key_fields' : {'key_type':'ssh',
- 'key': public_key}}
- ]
-
-def initscripts():
- return [ { 'initscript_fields' : { 'enabled' : True,
- 'name':'script_plain',
- 'script' : '#! /bin/sh\n (echo Starting test initscript: Stage 1; date) > /tmp/initscript_plain.log \n ',
- }},
- { 'initscript_fields' : { 'enabled' : True,
- 'name':'script_centos',
- 'script' : '#! /bin/sh\n (echo Starting test initscript: Stage 2; date) > /tmp/initscript_centos.log \n ',
- }},
- ]
-
-def slices ():
- plain= { 'slice_fields': {'name':'wifi_plain',
- 'instantiation':'plc-instantiated',
- 'url':'http://foo@foo.com',
- 'description':'plain slice',
- 'max_nodes':10,
- },
- 'usernames' : [ 'pi','tech','techuser' ],
- 'nodenames' : all_nodenames(),
- 'initscriptname' : 'script_plain',
- 'sitename' : 'wifi',
- 'owner' : 'pi',
- }
- centos= { 'slice_fields': {'name':'wifi_centos',
- 'instantiation':'plc-instantiated',
- 'url':'http://foo@foo.com',
- 'description':'centos slice',
- 'max_nodes':10,
- },
- 'usernames' : [ 'pi','tech','techuser' ],
- 'nodenames' : all_nodenames(),
- 'initscriptname' : 'script_centos',
- 'sitename' : 'wifi',
- 'owner' : 'pi',
- 'vref' : 'centos5',
- }
- ]
-
-def plc () :
- return {
- 'index' : index,
- 'name' : 'wifilab',
- # as of yet, not sure we can handle foreign hosts, but this is required though
- 'hostname' : 'wlab24.inria.fr',
- # set these two items to run within a vserver
- # 'vservername': 'somename'
- # 'vserverip': '138.96.250.131'
- 'role' : 'root',
- 'settings': {
- 'PLC_ROOT_USER' : 'root@wlab24.inria.fr',
- 'PLC_ROOT_PASSWORD' : 'test++',
- 'PLC_NAME' : 'WifiLab',
- 'PLC_MAIL_ENABLED':'true',
- 'PLC_MAIL_SUPPORT_ADDRESS' : 'thierry.parmentelat@inria.fr',
- 'PLC_DB_HOST' : 'wlab24.inria.fr',
- 'PLC_API_HOST' : 'wlab24.inria.fr',
- 'PLC_WWW_HOST' : 'wlab24.inria.fr',
- 'PLC_BOOT_HOST' : 'wlab24.inria.fr',
- 'PLC_NET_DNS1' : '138.96.0.10',
- 'PLC_NET_DNS2' : '138.96.0.11',
- 'PLC_DNS_ENABLED' : 'false',
- },
- 'sites' : sites(),
- 'keys' : keys(),
- 'initscripts': initscripts(),
- 'slices' : slices(),
- }
-
-def config (plc_specs,options):
- print "config_wifilab is obsolete .."
- import sys
- sys.exit(1)
- return plc_specs + [ plc() ]
########## cleanup: stop plc or kill qemus
function cleanup_qemus () {
for box in $qemuboxes ; do
- echo killing qemu instances in $box
- ssh root@$box pkill qemu
+ echo killing qemu instances in $box
+ ssh root@$box pkill qemu
done
if [ -f ~/tracker-qemus ] ; then
- echo ~/cleaning tracker-qemus ; rm ~/tracker-qemus
+ echo ~/cleaning tracker-qemus ; rm ~/tracker-qemus
fi
}
function cleanup_plcs () {
for box in $plcboxes ; do
- echo stopping vserver instances in $box
- ssh root@$box 'cd /vservers ; for vsname in *; do echo -n "stop $vsname y/[n] ? "; read answer ; case $answer in nN) echo skipping $vsname ;; *) vserver $vsname stop ;; esac; done'
+ echo stopping vserver instances in $box
+ ssh root@$box 'cd /vservers ; for vsname in *; do echo -n "stop $vsname y/[n] ? "; read answer ; case $answer in nN) echo skipping $vsname ;; *) vserver $vsname stop ;; esac; done'
done
if [ -f ~/tracker-plcs ] ; then
- echo ~/cleaning tracker-plcs
- rm ~/tracker-plcs
+ echo ~/cleaning tracker-plcs
+ rm ~/tracker-plcs
fi
}
alias cleanup_all="cleanup_qemus ; cleanup_plcs"
### check for util-vserver 'chkconfig on' ######################################
##################################################################################
files = [
- '/etc/rc1.d/K90util-vserver',
- '/etc/rc2.d/S10util-vserver',
- '/etc/rc3.d/S10util-vserver',
- '/etc/rc4.d/S10util-vserver',
- '/etc/rc5.d/S10util-vserver',
- '/etc/rc6.d/K90util-vserver',
+ '/etc/rc1.d/K90util-vserver',
+ '/etc/rc2.d/S10util-vserver',
+ '/etc/rc3.d/S10util-vserver',
+ '/etc/rc4.d/S10util-vserver',
+ '/etc/rc5.d/S10util-vserver',
+ '/etc/rc6.d/K90util-vserver',
]
print "[CHECKING] util-vserver init scripts : ",
for file in files:
- if os.path.exists(file):
- print ".",
- sys.stdout.flush()
- else:
- print "\n[FAILED] util-vserver does not appear to be enabled via 'chkconfig util-vserver on'."
- sys.exit(1)
+ if os.path.exists(file):
+ print ".",
+ sys.stdout.flush()
+ else:
+ print "\n[FAILED] util-vserver does not appear to be enabled via 'chkconfig util-vserver on'."
+ sys.exit(1)
print "\n[PASSED] util-vserver runlevel init script is enabled"
#!/usr/bin/python
# Module: VNET+
-# Description:
+# Description:
# Connect to the node manager
# Author: acb@cs.princeton.edu/sapanb@cs.princeton.edu
# Copyright (C) 2006 The Trustees of Princeton University
#
+# NOTE on porting to python3
+#
+# this file gets fed to plcsh on the tested myplc, so
+# it needs to remain python2 for now
+#
+
from pprint import pprint
from string import letters, digits, punctuation, whitespace
from traceback import print_exc
import time
import subprocess
import socket
-import SocketServer
+import socketserver
import threading
from optparse import OptionParser
def myprint(message, id='client'):
now = time.strftime("%H:%M:%S", time.localtime())
- print "* {now} ({id}) -- {message}".format(**locals())
+ print("* {now} ({id}) -- {message}".format(**locals()))
sys.stdout.flush()
def show_network_status(id):
myprint("ip route show", id=id)
subprocess.call(['ip', 'route', 'show'])
-class EchoRequestHandler(SocketServer.StreamRequestHandler):
+class EchoRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
-class UppercaseRequestHandler(SocketServer.StreamRequestHandler):
+class UppercaseRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line.upper())
myprint("==================== tcptest.py server", id='server')
show_network_status(id='server')
- server = SocketServer.TCPServer((options.address, options.port),
+ server = socketserver.TCPServer((options.address, options.port),
UppercaseRequestHandler)
try:
if options.timeout:
else:
server.serve_forever()
except KeyboardInterrupt as e:
- print 'Bailing out on keyboard interrupt'
+ print('Bailing out on keyboard interrupt')
sys.exit(1)
class Ready:
s.bind((options.address, options.port))
return True
except Exception as e:
- print e
+ print(e)
return False
def eth0_has_ipv4():
elif arg.find("ready") >= 0:
sys.argv.remove(arg)
Ready().main()
- print 'you must specify either --client or --server'
+ print('you must specify either --client or --server')
sys.exit(1)
# how could this accept a list again ?
def header(message):
now = time.strftime("%H:%M:%S", time.localtime())
- print "*", now, '--', message
+ print("*", now, '--', message)
def pprint(message, spec, depth=2):
now = time.strftime("%H:%M:%S", time.localtime())
- print ">", now, "--", message
+ print(">", now, "--", message)
PrettyPrinter(indent=8, depth=depth).pprint(spec)
def system(command, background=False, silent=False, dry_run=None):
dry_run = dry_run if dry_run is not None else getattr(options, 'dry_run', False)
if dry_run:
- print 'dry_run:', command
+ print('dry_run:', command)
return 0
if silent :
if background:
command += " &"
if silent:
- print '.',
+ print('.', end=' ')
sys.stdout.flush()
else:
now = time.strftime("%H:%M:%S", time.localtime())
# don't show in summary
- print "->", now, '--',
+ print("->", now, '--', end=' ')
sys.stdout.flush()
if not silent:
command = "set -x; " + command
### WARNING : this ALWAYS does its job, even in dry_run mode
def output_of (command):
- import commands
- (code, string) = commands.getstatusoutput(command)
+ import subprocess
+ (code, string) = subprocess.getstatusoutput(command)
return (code, string)
return re.compile(pattern).match(string)
def locate_hooks_scripts (message, path, extensions):
- print message, 'searching', path, 'for extensions', extensions
+ print(message, 'searching', path, 'for extensions', extensions)
scripts = []
for ext in extensions:
# skip helper programs
exclude_options_keys = [ 'ensure_value' , 'read_file', 'read_module' ]
def show_options (message, options):
now = time.strftime("%H:%M:%S", time.localtime())
- print ">", now, "--", message
+ print(">", now, "--", message)
for k in dir(options):
if k.find("_") == 0:
continue
if k in exclude_options_keys:
continue
- print " ", k, ":", getattr(options, k)
+ print(" ", k, ":", getattr(options, k))