or %s
config defaults to the last value used, as stored in arg-config,
or %r
-node-ips and plc-ips defaults to the last value used, as stored in arg-ips-node and arg-ips-plc,
+ips_node, ips_plc and ips_qemu defaults to the last value used, as stored in arg-ips-{node,plc,qemu},
default is to use IP scanning
steps refer to a method in TestPlc or to a step_* module
===
help="List known steps")
parser.add_option("-N","--nodes",action="callback", callback=TestMain.optparse_list, dest="ips_node",
nargs=1,type="string",
- help="Specify the set of IP addresses to use for nodes (scanning disabled)")
+ help="Specify the set of hostname/IP's to use for nodes")
parser.add_option("-P","--plcs",action="callback", callback=TestMain.optparse_list, dest="ips_plc",
nargs=1,type="string",
- help="Specify the set of IP addresses to use for plcs (scanning disabled)")
+ help="Specify the set of hostname/IP's to use for plcs")
+ parser.add_option("-Q","--qemus",action="callback", callback=TestMain.optparse_list, dest="ips_qemu",
+ nargs=1,type="string",
+ help="Specify the set of hostname/IP's to use for qemu boxes")
parser.add_option("-s","--size",action="store",type="int",dest="size",default=1,
help="sets test size in # of plcs - default is 1")
parser.add_option("-D","--dbname",action="store",dest="dbname",default=None,
('build_url','arg-build-url',TestMain.default_build_url) ,
('ips_node','arg-ips-node',[]) ,
('ips_plc','arg-ips-plc',[]) ,
+ ('ips_qemu','arg-ips-qemu',[]) ,
('config','arg-config',TestMain.default_config) ,
('arch_rpms_url','arg-arch-rpms-url',"") ,
('personality','arg-personality',"linux32"),
for node_spec in site_spec['nodes']:
ips_node_file.write("%s\n"%node_spec['node_fields']['hostname'])
ips_node_file.close()
+ # ditto for qemu boxes
+ ips_qemu_file=open('arg-ips-qemu','w')
+ for plc_spec in all_plc_specs:
+ for site_spec in plc_spec['sites']:
+ for node_spec in site_spec['nodes']:
+ ips_qemu_file.write("%s\n"%node_spec['host_box'])
+ ips_qemu_file.close()
# build a TestPlc object from the result, passing options
for spec in all_plc_specs:
spec['disabled'] = False
#
# allows to pick an available IP among a pool
#
-# input is expressed as a list of tuples ('hostname_or_ip',user_data)
-# can be searched iteratively
+# input is expressed as a list of tuples (hostname,ip,user_data)
+# that can be searched iteratively for a free slot
+# TestPoolIP : look for a free IP address
+# TestPoolQemu : look for a test_box with no qemu running
# e.g.
-# pool = [ (hostname1,ip1,user_data1), (hostname2,ip2,user_data2),
-# (hostname3,ip3,user_data2), (hostname4,ip4,user_data4) ]
+# pool = [ (hostname1,ip1,user_data1),
+# (hostname2,ip2,user_data2),
+# (hostname3,ip3,user_data2),
+# (hostname4,ip4,user_data4) ]
# assuming that ip1 and ip3 are taken (pingable), then we'd get
-# pool=TestPool(pool)
+# pool=TestPoolIP(pool)
# pool.next_free() -> entry2
# pool.next_free() -> entry4
# pool.next_free() -> None
class TestPool:
- def __init__ (self, pool, options):
+ def __init__ (self, pool, options,message):
self.pool=pool
self.options=options
self.busy=[]
+ self.message=message
# let's be flexible
- def locate (self, hostname_or_ip, busy=False):
+ def locate_entry (self, hostname_or_ip, busy=True):
for (h,i,u) in self.pool:
if h.find(hostname_or_ip)>=0 or i.find(hostname_or_ip)>=0 :
if busy:
return None
def next_free (self):
- # if preferred is provided, let's re-order
if self.options.quiet:
- print 'TestPool is looking for a free IP address',
- for (host,ip,user_data) in self.pool:
- if host in self.busy:
+ print 'TestPool is looking for a %s'%self.message,
+ for (hostname,ip,user_data) in self.pool:
+ if hostname in self.busy:
continue
if not self.options.quiet:
- utils.header('TestPool : checking %s'%host)
+ utils.header('TestPool : checking %s'%hostname)
if self.options.quiet:
print '.',
- if not TestPool.check_ping (host):
+ if self.free_hostname(hostname):
if not self.options.quiet:
- utils.header('%s is available'%host)
+ utils.header('%s is available'%hostname)
else:
print ''
- self.busy.append(host)
- return (host,ip,user_data)
+ self.busy.append(hostname)
+ return (hostname,ip,user_data)
else:
- self.busy.append(host)
- return None
+ self.busy.append(hostname)
+ raise Exception, "No space left in pool (%s)"%self.message
+
+class TestPoolIP (TestPool):
+
+ def __init__ (self,pool,options):
+ TestPool.__init__(self,pool,options,"free IP address")
+
+ def free_hostname (self, hostname):
+ return not TestPoolIP.check_ping(hostname)
# OS-dependent ping option (support for macos, for convenience)
ping_timeout_option = None
# checks whether a given hostname/ip responds to ping
@staticmethod
def check_ping (hostname):
- if not TestPool.ping_timeout_option:
+ if not TestPoolIP.ping_timeout_option:
(status,osname) = commands.getstatusoutput("uname -s")
if status != 0:
raise Exception, "TestPool: Cannot figure your OS name"
if osname == "Linux":
- TestPool.ping_timeout_option="-w"
+ TestPoolIP.ping_timeout_option="-w"
elif osname == "Darwin":
- TestPool.ping_timeout_option="-t"
+ TestPoolIP.ping_timeout_option="-t"
- command="ping -c 1 %s 1 %s"%(TestPool.ping_timeout_option,hostname)
+ command="ping -c 1 %s 1 %s"%(TestPoolIP.ping_timeout_option,hostname)
(status,output) = commands.getstatusoutput(command)
return status == 0
+
+class TestPoolQemu (TestPool):
+
+ def __init__ (self,pool,options):
+ TestPool.__init__(self,pool,options,"free qemu box")
+
+ def free_hostname (self, hostname):
+ return not TestPoolQemu.busy_qemu(hostname)
+
+ # is there a qemu runing on that box already ?
+ @staticmethod
+ def busy_qemu (hostname):
+ command="ssh -o ConnectTimeout=5 root@%s ps -e -o cmd"%hostname
+ (status,output) = commands.getstatusoutput(command)
+ # if we fail to run that, let's assume we don't have ssh access, so
+ # we pretend the box is busy
+ if status!=0:
+ return True
+ elif output.find("qemu") >=0 :
+ return True
+ else:
+ return False
import sys
from TestMapper import TestMapper
+from TestPool import TestPoolQemu
-# using mapper to do the reallocation job
-
+# a small qemu pool for now
+onelab_qemus_pool = [
+ ( 'kruder.inria.fr', None, None),
+ ( 'estran.inria.fr', None, None),
+# cut here
+ ( 'blitz.inria.fr', None, None),
+]
+
def config (plcs, options):
- if options.personality == "linux32":
- plc_box ='speedball.inria.fr'
- node_box1 = 'testbox64_1.onelab.eu'
- node_box2 = 'testbox64_2.onelab.eu'
- label="32"
- elif options.personality == "linux64":
- plc_box = 'speedball.inria.fr'
- node_box1 = 'testbox64_1.onelab.eu'
- node_box2 = 'testbox64_2.onelab.eu'
- label="64"
- else:
- print 'Unsupported personality %s'%options.personality
- sys.exit(1)
+ # all plcs on the same vserver box
+ plc_box ='speedball.inria.fr'
+ # informative
+ label=options.personality.replace("linux","")
+
+ # all qemus on a unique pool of 64bits boxes
+ node_map = []
+ qemu_pool = TestPoolQemu (onelab_qemus_pool,options)
+ for index in range(options.size):
+ index += 1
+ if options.ips_qemu:
+ ip_or_hostname=options.ips_qemu.pop()
+ (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname)
+ else:
+ (hostname,ip,unused) = qemu_pool.next_free()
+ node_map += [ ('node%d'%index, {'host_box':hostname},) ]
mapper = {'plc': [ ('*' , {'hostname':plc_box,
'PLC_DB_HOST':plc_box,
'PLC_WWW_HOST':plc_box,
'name':'%s-'+label } )
],
- 'node': [ ('node1' , {'host_box': node_box1 } ),
- ('node2' , {'host_box': node_box2 } ),
- ],
+ 'node': node_map,
}
return TestMapper(plcs,options).map(mapper)
import utils
from TestMapper import TestMapper
-from TestPool import TestPool
+from TestPool import TestPoolIP
+
+onelab_nodes_ip_pool = [
+ ( 'vnode%02d.inria.fr'%i,
+ '138.96.255.%d'%(220+i),
+ '02:34:56:00:00:%02d'%i) for i in range(1,10) ]
-onelab_plcs_pool = [
- ( 'vnode%02d.inria.fr'%i, '138.96.255.%d'%(220+i), '02:34:56:00:00:%02d'%i) for i in range(1,10) ]
site_dict = {
'interface_fields:gateway':'138.96.248.250',
'interface_fields:network':'138.96.0.0',
def config (plcs, options):
- test_pool = TestPool (onelab_plcs_pool,options)
+ ip_pool = TestPoolIP (onelab_nodes_ip_pool,options)
test_mapper = TestMapper (plcs, options)
all_nodenames = test_mapper.node_names()
maps = []
for nodename in all_nodenames:
if options.ips_node:
- ip=options.ips_node.pop()
- (hostname,ip,mac)=test_pool.locate(ip,True)
+ ip_or_hostname=options.ips_node.pop()
+ (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
else:
- (hostname,ip,mac) = test_pool.next_free()
+ (hostname,ip,mac) = ip_pool.next_free()
utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip))
node_dict= {'node_fields:hostname':hostname,
'interface_fields:ip':ip,
import utils
import os.path
-from TestPool import TestPool
+from TestPool import TestPoolIP
-# the pool of IP addresses available - from 01 to 09
-#onelab_plcs_pool = [
-# ( 'vplc%02d.inria.fr'%i, '138.96.250.13%d'%i, '02:34:56:00:ee:%02d'%i) for i in range(1,10) ]
-# vplc09 is reserved for a fixed myplc - from 01 to 08
-onelab_plcs_pool = [
- ( 'vplc%02d.inria.fr'%i, '138.96.255.%d'%(200+i), '02:34:56:00:ee:%02d'%i) for i in range(1,21) ]
+onelab_plcs_ip_pool = [
+ ( 'vplc%02d.inria.fr'%i,
+ '138.96.255.%d'%(200+i),
+ '02:34:56:00:ee:%02d'%i) for i in range(1,21) ]
def config (plcs,options):
utils.header ("Turning configuration into a vserver-based one for onelab")
- test_pool = TestPool (onelab_plcs_pool,options)
+ ip_pool = TestPoolIP (onelab_plcs_ip_pool,options)
plc_counter=0
for plc in plcs:
try:
if options.ips_plc :
- ip=options.ips_plc.pop()
- (hostname,ip,mac)=test_pool.locate(ip,True)
+ ip_or_hostname=options.ips_plc.pop()
+ (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
if not options.quiet:
utils.header("Using user-provided %s %s for plc %s"%(
- hostname,ip,plc['name']))
+ hostname,ip_or_hostname,plc['name']))
else:
- (hostname,ip,mac)=test_pool.next_free()
+ (hostname,ip,mac)=ip_pool.next_free()
if not options.quiet:
utils.header("Using auto-allocated %s %s for plc %s"%(
hostname,ip,plc['name']))
# (**) TestMain options field
# (*) and that returns the new set of plc_specs
-# values like 'hostname', 'ip' and the like my be rewritten later with a TestPool object
+# values like 'hostname', 'ip' and the like are rewritten later with a TestPool object
def nodes(options,index):
return [{'name':'node%d'%index,