from TestSite import TestSite
from TestNode import TestNode
+# add $HOME in PYTHONPATH so we can import LocalTestResources.py
+sys.path.append(os.environ['HOME'])
+import LocalTestResources
+
class TestMain:
subversion_id = "$Id$"
self.path=os.path.dirname(sys.argv[0]) or "."
os.chdir(self.path)
- @staticmethod
- def show_env (options, message):
- utils.header (message)
- utils.show_options("main options",options)
+ def show_env (self,options, message):
+ if self.options.verbose:
+ utils.header (message)
+ utils.show_options("main options",options)
+
+ def init_steps(self):
+ self.steps_message=20*'x'+" Defaut steps are\n"+TestPlc.printable_steps(TestPlc.default_steps)
+ self.steps_message += "\n"+20*'x'+" Other useful steps are\n"+TestPlc.printable_steps(TestPlc.other_steps)
+
+ def list_steps(self):
+ if not self.options.verbose:
+ print self.steps_message
+ else:
+ testplc_method_dict = __import__("TestPlc").__dict__['TestPlc'].__dict__
+ scopes = [("Default steps",TestPlc.default_steps)]
+ if self.options.all_steps:
+ scopes.append ( ("Other steps",TestPlc.other_steps) )
+ for (scope,steps) in scopes:
+ print '--------------------',scope
+ for step in [step for step in steps if TestPlc.valid_step(step)]:
+ stepname=step
+ if step.find("force_") == 0:
+ stepname=step.replace("force_","")
+ force=True
+ print '*',step,"\r",4*"\t",
+ try:
+ print testplc_method_dict[stepname].__doc__
+ except:
+ print "*** no doc found"
@staticmethod
def optparse_list (option, opt, value, parser):
setattr(parser.values,option.dest,value.split())
def run (self):
- steps_message=20*'x'+" Defaut steps are\n"+TestPlc.printable_steps(TestPlc.default_steps)
- steps_message += "\n"+20*'x'+" Other useful steps are\n"+TestPlc.printable_steps(TestPlc.other_steps)
+ self.init_steps()
usage = """usage: %%prog [options] steps
arch-rpms-url defaults to the last value used, as stored in arg-arch-rpms-url,
no default
steps refer to a method in TestPlc or to a step_* module
===
"""%(TestMain.default_build_url,TestMain.default_config)
- usage += steps_message
+ usage += self.steps_message
parser=OptionParser(usage=usage,version=self.subversion_id)
parser.add_option("-u","--url",action="store", dest="arch_rpms_url",
help="URL of the arch-dependent RPMS area - for locating what to test")
if self.options.quiet:
self.options.verbose=False
+ # no step specified
if len(self.args) == 0:
- if self.options.all_steps:
- self.options.steps=TestPlc.default_steps
- elif self.options.dry_run:
- self.options.steps=TestPlc.default_steps
- elif self.options.list_steps:
- print steps_message
- sys.exit(1)
- else:
- print 'No step found (do you mean -a ? )'
- print "Run %s --help for help"%sys.argv[0]
- sys.exit(1)
+ self.options.steps=TestPlc.default_steps
else:
self.options.steps = self.args
+ if self.options.list_steps:
+ self.list_steps()
+ sys.exit(1)
+
# handle defaults and option persistence
for (recname,filename,default) in (
('build_url','arg-build-url',TestMain.default_build_url) ,
if isinstance(getattr(self.options,recname),list):
getattr(self.options,recname).reverse()
- if not self.options.quiet:
+ if self.options.verbose:
utils.header('* Using %s = %s'%(recname,getattr(self.options,recname)))
traceback.print_exc()
print 'Cannot load config %s -- ignored'%modulename
raise
+
+ # run localize as defined by local_resources
+ all_plc_specs = LocalTestResources.local_resources.localize(all_plc_specs,self.options)
+
# remember plc IP address(es) if not specified
ips_plc_file=open('arg-ips-plc','w')
for plc_spec in all_plc_specs:
def apply_first_map (self, type, name, obj, maplist):
for (map_pattern,rename_dict) in maplist:
if utils.match (name,map_pattern):
- if not self.options.quiet:
+ if self.options.verbose:
utils.header("TestMapper/%s : applying rules '%s' on %s"%(type,map_pattern,name))
for (k,v) in rename_dict.iteritems():
# apply : separator
for step in path[:-1]:
if not o.has_key(step):
o[step]={}
- if not self.options.quiet:
+ if self.options.verbose:
utils.header ("WARNING : created step %s in path %s on %s %s"%(
step,path,type,name))
o=o[step]
# last step is the one for side-effect
step=path[-1]
- if not self.options.quiet:
+ if self.options.verbose:
if not o.has_key(step):
utils.header ("WARNING : inserting key %s for path %s on %s %s"%(
step,path,type,name))
# apply formatting if '%s' found in the value
if v.find('%s')>=0:
v=v%obj[k]
- if not self.options.quiet:
+ if self.options.verbose:
print("TestMapper, rewriting %s: %s into %s"%(name,k,v))
o[step]=v
# only apply first rule
from TestBox import TestBox
from TestSsh import TestSsh
from TestApiserver import TestApiserver
-from Trackers import TrackerPlc, TrackerQemu
# step methods must take (self) and return a boolean (options is a member of the class)
test_node = TestNode (self,test_site,node_spec)
if not node_method(test_node): overall=False
return overall
+ # restore the doc text
+ actual.__doc__=method.__doc__
return actual
def slice_mapper_options (method):
test_slice=TestSlice(self,test_site,slice_spec)
if not slice_method(test_slice,self.options): overall=False
return overall
+ # restore the doc text
+ actual.__doc__=method.__doc__
return actual
SEP='<sep>'
class TestPlc:
default_steps = [
- 'display','trqemu_record','trqemu_free','uninstall','install','install_rpm',
- 'configure', 'start', 'fetch_keys', SEP,
- 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
- 'sites', 'nodes', 'slices', 'nodegroups', SEP,
- 'init_node','bootcd', 'configure_qemu', 'export_qemu',
- 'kill_all_qemus', 'reinstall_node','start_node', SEP,
+ 'display', 'local_pre', SEP,
+ 'delete','create','install', 'configure', 'start', SEP,
+ 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
+ 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP,
+ 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
+ 'kill_all_qemus', 'start_node', SEP,
# better use of time: do this now that the nodes are taking off
'plcsh_stress_test', SEP,
'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
- 'check_tcp', SEP,
- 'check_sanity', SEP,
- 'force_gather_logs', 'force_trplc_record','force_trplc_free',
+ 'check_tcp', 'check_sanity', SEP,
+ 'force_gather_logs', 'force_local_post',
]
other_steps = [
- 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
+ 'fresh_install', 'stop', 'vs_start', SEP,
'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
- 'clean_sites', 'clean_nodes',
- 'clean_slices', 'clean_keys', SEP,
+ 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
'populate' , SEP,
'show_boxes', 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
- 'db_dump' , 'db_restore', 'trplc_cleanup','trqemu_cleanup','trackers_cleanup', SEP,
+ 'db_dump' , 'db_restore', SEP,
+ 'local_list','local_cleanup',SEP,
'standby_1 through 20',
]
# make this a valid step
def kill_all_qemus(self):
+ "all qemu boxes: kill all running qemus (even of former runs)"
# this is the brute force version, kill all qemus on that host box
for (box,nodes) in self.gather_hostBoxes().iteritems():
# pass the first nodename, as we don't push template-qemu on testboxes
#################### display config
def display (self):
+ "show test configuration after localization"
self.display_pass (1)
self.display_pass (2)
return True
print '*\tqemu box %s'%node_spec['host_box']
print '*\thostname=%s'%node_spec['node_fields']['hostname']
- ### tracking
- def trplc_record (self):
- tracker = TrackerPlc(self.options)
- tracker.record(self.test_ssh.hostname,self.vservername)
- tracker.store()
- return True
-
- def trplc_free (self):
- tracker = TrackerPlc(self.options)
- tracker.free()
- tracker.store()
- return True
-
- def trplc_cleanup (self):
- tracker = TrackerPlc(self.options)
- tracker.cleanup()
- tracker.store()
- return True
-
- def trqemu_record (self):
- tracker=TrackerQemu(self.options)
- for site_spec in self.plc_spec['sites']:
- for node_spec in site_spec['nodes']:
- tracker.record(node_spec['host_box'],self.options.buildname,node_spec['node_fields']['hostname'])
- tracker.store()
- return True
-
- def trqemu_free (self):
- tracker=TrackerQemu(self.options)
- for site_spec in self.plc_spec['sites']:
- for node_spec in site_spec['nodes']:
- tracker.free()
- tracker.store()
- return True
-
- def trqemu_cleanup (self):
- tracker=TrackerQemu(self.options)
- for site_spec in self.plc_spec['sites']:
- for node_spec in site_spec['nodes']:
- tracker.cleanup()
- tracker.store()
- return True
-
- def trackers_cleanup (self):
- self.trqemu_cleanup()
- self.trplc_cleanup()
- return True
-
- def uninstall(self):
+ def local_pre (self):
+ "run site-dependant pre-test script as defined in LocalTestResources"
+ from LocalTestResources import local_resources
+ return local_resources.step_pre(self)
+
+ def local_post (self):
+ "run site-dependant post-test script as defined in LocalTestResources"
+ from LocalTestResources import local_resources
+ return local_resources.step_post(self)
+
+ def local_list (self):
+ "run site-dependant list script as defined in LocalTestResources"
+ from LocalTestResources import local_resources
+ return local_resources.step_list(self)
+
+ def local_cleanup (self):
+ "run site-dependant cleanup script as defined in LocalTestResources"
+ from LocalTestResources import local_resources
+ return local_resources.step_cleanup(self)
+
+ def delete(self):
+ "vserver delete the test myplc"
self.run_in_host("vserver --silent %s delete"%self.vservername)
return True
### install
- def install(self):
+ def create (self):
+ "vserver creation (no install done)"
if self.is_local():
# a full path for the local calls
build_dir=os.path.dirname(sys.argv[0])
return self.run_in_host(create_vserver) == 0
### install_rpm
- def install_rpm(self):
+ def install(self):
+ "yum install myplc, noderepo, and the plain bootstrapfs"
if self.options.personality == "linux32":
arch = "i386"
elif self.options.personality == "linux64":
###
def configure(self):
+ "run plc-config-tty"
tmpname='%s.plc-config-tty'%(self.name())
fileconf=open(tmpname,'w')
for var in [ 'PLC_NAME',
return True
def start(self):
+ "service plc start"
self.run_in_guest('service plc start')
return True
def stop(self):
+ "service plc stop"
self.run_in_guest('service plc stop')
return True
# stores the keys from the config for further use
def store_keys(self):
+ "stores test users ssh keys in keys/"
for key_spec in self.plc_spec['keys']:
TestKey(self,key_spec).store_key()
return True
# fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
# for later direct access to the nodes
def fetch_keys(self):
+ "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
dir="./keys"
if not os.path.isdir(dir):
os.mkdir(dir)
return overall
def sites (self):
+ "create sites with PLCAPI"
return self.do_sites()
def clean_sites (self):
+ "delete sites with PLCAPI"
return self.do_sites(action="delete")
def do_sites (self,action="add"):
self.apiserver.DeleteSite(self.auth_root(),site_id)
def nodes (self):
+ "create nodes with PLCAPI"
return self.do_nodes()
def clean_nodes (self):
+ "delete nodes with PLCAPI"
return self.do_nodes(action="delete")
def do_nodes (self,action="add"):
return True
def nodegroups (self):
+ "create nodegroups with PLCAPI"
return self.do_nodegroups("add")
def clean_nodegroups (self):
+ "delete nodegroups with PLCAPI"
return self.do_nodegroups("delete")
# create nodegroups if needed, and populate
return True
def nodes_ssh_debug(self):
+ "Tries to ssh into nodes in debug mode with the debug ssh key"
return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
def nodes_ssh_boot(self):
+ "Tries to ssh into nodes in production mode with the root ssh key"
return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
@node_mapper
- def init_node (self): pass
+ def init_node (self):
+ "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
+ pass
@node_mapper
- def bootcd (self): pass
+ def bootcd (self):
+ "all nodes: invoke GetBootMedium and store result locally"
+ pass
@node_mapper
- def configure_qemu (self): pass
+ def configure_qemu (self):
+ "all nodes: compute qemu config qemu.conf and store it locally"
+ pass
@node_mapper
- def reinstall_node (self): pass
+ def reinstall_node (self):
+ "all nodes: mark PLCAPI boot_state as reinstall"
+ pass
@node_mapper
- def export_qemu (self): pass
+ def export_qemu (self):
+ "all nodes: push local node-dep directory on the qemu box"
+ pass
### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
def check_sanity_node (self):
return self.locate_first_sliver().check_sanity()
def check_sanity (self):
+ "runs unit tests in the node and slice contexts - see tests/qaapi/qa/tests/{node,slice}"
return self.check_sanity_node() and self.check_sanity_sliver()
### initscripts
return overall
def check_initscripts(self):
- return self.do_check_initscripts()
-
+ "check that the initscripts have triggered"
+ return self.do_check_initscripts()
+
def initscripts (self):
+ "create initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
return True
def clean_initscripts (self):
+ "delete initscripts with PLCAPI"
for initscript in self.plc_spec['initscripts']:
initscript_name = initscript['initscript_fields']['name']
print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
### manage slices
def slices (self):
+ "create slices with PLCAPI"
return self.do_slices()
def clean_slices (self):
+ "delete slices with PLCAPI"
return self.do_slices("delete")
def do_slices (self, action="add"):
return True
@slice_mapper_options
- def check_slice(self): pass
+ def check_slice(self):
+ "tries to ssh-enter the slice with the user key, to ensure slice creation"
+ pass
@node_mapper
- def clear_known_hosts (self): pass
+ def clear_known_hosts (self):
+ "remove test nodes entries from the local known_hosts file"
+ pass
@node_mapper
- def start_node (self) : pass
+ def start_node (self) :
+ "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
+ pass
def check_tcp (self):
+ "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
specs = self.plc_spec['tcp_test']
overall=True
for spec in specs:
return overall
def plcsh_stress_test (self):
+ "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
remote="/vservers/%s/%s"%(self.vservername,location)
# in particular runs with --preserve (dont cleanup) and without --check
# also it gets run twice, once with the --foreign option for creating fake foreign entries
def populate (self):
+ "creates random entries in the PLCAPI"
# install the stress-test in the plc image
location = "/usr/share/plc_api/plcsh_stress_test.py"
remote="/vservers/%s/%s"%(self.vservername,location)
return ( local and remote)
def gather_logs (self):
+ "gets all possible logs from plc's/qemu node's/slice's for future reference"
# (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
# (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
# (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
--- /dev/null
+#
+# $Id$
+#
+
+import sys
+
+import utils
+from TestMapper import TestMapper
+from TestPool import TestPoolQemu, TestPoolIP
+from Trackers import TrackerPlc, TrackerQemu
+
+class TestResources ():
+
+ # need more specialization, see an example in OnelabTestResources
+
+ ##########
+ def localize (self,plcs,options):
+ try:
+ plcs = self.localize_qemus(plcs,options)
+ except:
+ print 'Could not localize qemus - exiting'
+ sys.exit(1)
+ try:
+ plcs = self.localize_nodes(plcs,options)
+ except:
+ print 'Could not localize nodes - exiting'
+ sys.exit(1)
+ try:
+ plcs = self.localize_plcs(plcs,options)
+ except:
+ print 'Could not localize plcs - exiting'
+ sys.exit(1)
+ return plcs
+
+ def localize_qemus (self,plcs,options):
+
+ # all plcs on the same vserver box
+ plc_box = self.plc_boxes()[0]
+
+ # informative
+ label=options.personality.replace("linux","")
+
+ node_map = []
+ qemu_pool = TestPoolQemu (self.qemus_ip_pool(), options)
+
+ for index in range(options.size):
+ index += 1
+ if options.ips_qemu:
+ ip_or_hostname=options.ips_qemu.pop()
+ (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname)
+ else:
+ (hostname,ip,unused) = qemu_pool.next_free()
+
+ node_map += [ ('node%d'%index, {'host_box':hostname},) ]
+
+ mapper = {'plc': [ ('*' , {'hostname':plc_box,
+ 'PLC_DB_HOST':plc_box,
+ 'PLC_API_HOST':plc_box,
+ 'PLC_BOOT_HOST':plc_box,
+ 'PLC_WWW_HOST':plc_box,
+ 'name':'%s-'+label } )
+ ],
+ 'node': node_map,
+ }
+
+ return TestMapper(plcs,options).map(mapper)
+
+
+ def localize_nodes (self, plcs, options):
+
+ ip_pool = TestPoolIP (self.nodes_ip_pool(),options)
+ network_dict = self.network_dict()
+
+ test_mapper = TestMapper (plcs, options)
+
+ all_nodenames = test_mapper.node_names()
+ maps = []
+ for nodename in all_nodenames:
+ if options.ips_node:
+ ip_or_hostname=options.ips_node.pop()
+ print 'debug','in',ip_or_hostname,'out',ip_pool.locate_entry(ip_or_hostname)
+ (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
+ else:
+ (hostname,ip,mac) = ip_pool.next_free()
+ utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip))
+ node_dict= {'node_fields:hostname':hostname,
+ 'interface_fields:ip':ip,
+ 'interface_fields:mac':mac,
+ }
+
+ node_dict.update(network_dict)
+ maps.append ( ( nodename, node_dict) )
+
+ plc_map = [ ( '*' , { 'PLC_NET_DNS1' : network_dict [ 'interface_fields:dns1' ],
+ 'PLC_NET_DNS2' : network_dict [ 'interface_fields:dns2' ], } ) ]
+
+ return test_mapper.map ({'node': maps, 'plc' : plc_map } )
+
+
+ def localize_plcs (self,plcs,options):
+
+ utils.header ("Turning configuration into a vserver-based one for onelab")
+
+ ip_pool = TestPoolIP (self.plcs_ip_pool(),options)
+
+ plc_counter=0
+ for plc in plcs:
+ if options.ips_plc :
+ ip_or_hostname=options.ips_plc.pop()
+ (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
+ if options.verbose:
+ utils.header("Using user-provided %s %s for plc %s"%(
+ hostname,ip_or_hostname,plc['name']))
+ else:
+ (hostname,ip,mac)=ip_pool.next_free()
+ if options.verbose:
+ utils.header("Using auto-allocated %s %s for plc %s"%(
+ hostname,ip,plc['name']))
+
+ ### rewrite fields in plc
+ # compute a helpful vserver name - remove domain in hostname
+ simplehostname=hostname.split('.')[0]
+ vservername = options.buildname
+ if len(plcs) == 1 :
+ vservername = "%s-%s" % (vservername,simplehostname)
+ else:
+ plc_counter += 1
+ vservername = "%s-%d-%s" % (vservername,plc_counter,simplehostname)
+ # apply
+ plc['vservername']=vservername
+ plc['vserverip']=ip
+ plc['name'] = "%s_%s"%(plc['name'],simplehostname)
+ utils.header("Attaching plc %s to vserver %s (%s)"%(
+ plc['name'],plc['vservername'],plc['vserverip']))
+ for key in [ 'PLC_DB_HOST', 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST',]:
+ plc[key] = hostname
+
+ return plcs
+
+ # as a plc step this should return a boolean
+ def step_pre (self,plc):
+ return self.trqemu_record (plc) and self.trqemu_free(plc)
+
+ def step_post (self,plc):
+ return self.trplc_record (plc) and self.trplc_free(plc)
+
+ def step_cleanup (self,plc):
+ return self.trqemu_cleanup(plc) and self.trplc_cleanup(plc)
+
+ ####################
+ def trqemu_record (self,plc):
+ tracker=TrackerQemu(plc.options,instances=self.max_qemus()-1)
+ for site_spec in plc.plc_spec['sites']:
+ for node_spec in site_spec['nodes']:
+ tracker.record(node_spec['host_box'],plc.options.buildname,node_spec['node_fields']['hostname'])
+ tracker.store()
+ return True
+
+ def trqemu_free (self,plc):
+ tracker=TrackerQemu(plc.options,instances=self.max_qemus()-1)
+ for site_spec in plc.plc_spec['sites']:
+ for node_spec in site_spec['nodes']:
+ tracker.free()
+ tracker.store()
+ return True
+
+ ###
+ def trplc_record (self):
+ tracker = TrackerPlc(plc.options,instances=self.max_plcs())
+ tracker.record(self.test_ssh.hostname,self.vservername)
+ tracker.store()
+ return True
+
+ def trplc_free (self):
+ tracker = TrackerPlc(plc.options,instances=self.max_plcs())
+ tracker.free()
+ tracker.store()
+ return True
+
+ ###
+ def trqemu_cleanup (self,plc):
+ tracker=TrackerQemu(plc.options,instances=self.max_qemus()-1)
+ for site_spec in plc.plc_spec['sites']:
+ for node_spec in site_spec['nodes']:
+ tracker.cleanup()
+ tracker.store()
+ return True
+
+ def trplc_cleanup (self,plc):
+ tracker = TrackerPlc(plc.options,instances=self.max_plcs())
+ tracker.cleanup()
+ tracker.store()
+ return True
+
--- /dev/null
+Historically, all the site-dependant configuration was part of the svn
+tree.
+
+However this turned out to be a wrong choice as the actual test h/w
+configuration changes over time, so when you try to test e.g. a 4.2
+build in 2009, you get a very old description of the test hardware.
+
+So, here's the way out
+
+==================== loading local resources
+
+- the test code expects to find a module named LocalTestResources in $HOME
+- and this module is expected to define the following stuff
+
+* one object named local_resources that implement the following methods
+
+* local_resources.localize (plcs,options) -> plcs
+--> this is called on the plcs obtained after the configs are loaded,
+ and should return the altered plc
+
+* local_resources.local_pre (plc) -> boolean
+--> this is the implementation of the local_pre step on TestPlc
+does any required house cleaning, like turning down qemu instances or the like
+
+* local_resources.local_post (plc) -> boolean
+--> this is the implementation of the local_post step on TestPlc
+
+local_pre and local_post are parts of the default set of steps
+
+* local_list (plc) -> boolean
+* local_cleanup (plc) -> boolean
+
+
+===================== template
+
+The TestResources class can be used as a template for implementing local_resources
+See OnelabTestResources for how to use it
tracks=[]
self.tracks = [track for track in tracks if track]
+ def list (self):
+ try:
+ contents=file(self.filename).read()
+ print "==>",self.filename,"<=="
+ print contents
+ except:
+ print "xxxxxxxxxxxx",self.filename,"not found"
+
def store (self):
out = file(self.filename,'w')
for track in self.tracks:
self.tracks.append( track )
print "Recorded %s in tracker %s"%(track,self.filename)
- # this actually stops the old instances to fit the number of instances
+ # this actually stops the old instances, so that the total fits in the number of instances
def free (self):
# number of instances to stop
how_many=len(self.tracks)-self.instances
DEFAULT_FILENAME=os.environ['HOME']+"/tracker-qemus"
# how many concurrent plcs are we keeping alive - adjust with the IP pool size
- DEFAULT_MAX_INSTANCES = 2
+ DEFAULT_MAX_INSTANCES = 3
def __init__ (self,options,filename=None,instances=0):
if not filename: filename=TrackerQemu.DEFAULT_FILENAME
+++ /dev/null
-# the defaults
-import utils
-import TestPlc
-
-# this default is for the OneLab test infrastructure
-
-def config (plc_specs, options):
-
- import config_main
- plcs = config_main.config([],options)
- if options.verbose:
- print '======================================== AFTER main'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- import config_1testqemus
- plcs = config_1testqemus.config (plcs,options)
- if options.verbose:
- print '======================================== AFTER testqemus'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- import config_1vnodes
- plcs = config_1vnodes.config(plcs,options)
- if options.verbose:
- print '======================================== AFTER vnodes'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- import config_1vplcs
- plcs = config_1vplcs.config (plcs,options)
- if options.verbose:
- print '======================================== AFTER vservers'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- return plcs
-
+++ /dev/null
-import sys
-
-from TestMapper import TestMapper
-from TestPool import TestPoolQemu
-
-onelab_qemus_pool = [ ( 'testqemu%d.onelab.eu'%i, None, None) for i in range(1,4) ]
-
-def config (plcs, options):
-
- # all plcs on the same vserver box
- plc_box ='testbox-plc.onelab.eu'
- # informative
- label=options.personality.replace("linux","")
-
- # all qemus on a unique pool of 64bits boxes
- node_map = []
- qemu_pool = TestPoolQemu (onelab_qemus_pool,options)
- for index in range(options.size):
- index += 1
- if options.ips_qemu:
- ip_or_hostname=options.ips_qemu.pop()
- (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname)
- else:
- (hostname,ip,unused) = qemu_pool.next_free()
- node_map += [ ('node%d'%index, {'host_box':hostname},) ]
-
- mapper = {'plc': [ ('*' , {'hostname':plc_box,
- 'PLC_DB_HOST':plc_box,
- 'PLC_API_HOST':plc_box,
- 'PLC_BOOT_HOST':plc_box,
- 'PLC_WWW_HOST':plc_box,
- 'name':'%s-'+label } )
- ],
- 'node': node_map,
- }
-
- return TestMapper(plcs,options).map(mapper)
+++ /dev/null
-# map all nodes onto the avail. pool
-
-import utils
-from TestMapper import TestMapper
-from TestPool import TestPoolIP
-
-onelab_nodes_ip_pool = [
- ( 'vnode%02d.inria.fr'%i,
- '138.96.255.%d'%(230+i),
- '02:34:56:00:00:%02d'%i) for i in range(1,10) ]
-
-site_dict = {
- 'interface_fields:gateway':'138.96.248.250',
- 'interface_fields:network':'138.96.0.0',
- 'interface_fields:broadcast':'138.96.255.255',
- 'interface_fields:netmask':'255.255.0.0',
- 'interface_fields:dns1': '138.96.0.10',
- 'interface_fields:dns2': '138.96.0.11',
-}
-
-def config (plcs, options):
-
- ip_pool = TestPoolIP (onelab_nodes_ip_pool,options)
- test_mapper = TestMapper (plcs, options)
-
- all_nodenames = test_mapper.node_names()
- maps = []
- for nodename in all_nodenames:
- if options.ips_node:
- ip_or_hostname=options.ips_node.pop()
- (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
- else:
- (hostname,ip,mac) = ip_pool.next_free()
- utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip))
- node_dict= {'node_fields:hostname':hostname,
- 'interface_fields:ip':ip,
- 'interface_fields:mac':mac,
- }
-
- node_dict.update(site_dict)
- maps.append ( ( nodename, node_dict) )
-
- plc_map = [ ( '*' , { 'PLC_NET_DNS1' : site_dict [ 'interface_fields:dns1' ],
- 'PLC_NET_DNS2' : site_dict [ 'interface_fields:dns2' ], } ) ]
-
- return test_mapper.map ({'node': maps, 'plc' : plc_map } )
+++ /dev/null
-import utils
-import os.path
-from TestPool import TestPoolIP
-
-# using vplc01 .. vplc15 - keep [16,17,18] for 4.2 and 19 and 20 for long-haul tests
-onelab_plcs_ip_pool = [
- ( 'vplc%02d.inria.fr'%i,
- '138.96.255.%d'%(200+i),
- '02:34:56:00:ee:%02d'%i) for i in range(1,16) ]
-
-def config (plcs,options):
-
- utils.header ("Turning configuration into a vserver-based one for onelab")
-
- ip_pool = TestPoolIP (onelab_plcs_ip_pool,options)
-
- plc_counter=0
- for plc in plcs:
- try:
- if options.ips_plc :
- ip_or_hostname=options.ips_plc.pop()
- (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
- if not options.quiet:
- utils.header("Using user-provided %s %s for plc %s"%(
- hostname,ip_or_hostname,plc['name']))
- else:
- (hostname,ip,mac)=ip_pool.next_free()
- if not options.quiet:
- utils.header("Using auto-allocated %s %s for plc %s"%(
- hostname,ip,plc['name']))
-
- ### rewrite fields in plc
- # compute a helpful vserver name - remove domain in hostname
- simplehostname=hostname.split('.')[0]
- vservername = options.buildname
- if len(plcs) == 1 :
- vservername = "%s-%s" % (vservername,simplehostname)
- else:
- plc_counter += 1
- vservername = "%s-%d-%s" % (vservername,plc_counter,simplehostname)
- # apply
- plc['vservername']=vservername
- plc['vserverip']=ip
- plc['name'] = "%s_%s"%(plc['name'],simplehostname)
- utils.header("Attaching plc %s to vserver %s (%s)"%(
- plc['name'],plc['vservername'],plc['vserverip']))
- for key in [ 'PLC_DB_HOST', 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST',]:
- plc[key] = hostname
-
- except:
- raise Exception('Cannot find an available IP for %s - exiting'%plc['name'])
-
- return plcs
+++ /dev/null
-# the defaults
-import utils
-import TestPlc
-
-# this default is for the Princeton test infrastructure
-
-def config (plc_specs, options):
-
- import config_main
- plcs = config_main.config([],options)
- if options.verbose:
- print '======================================== AFTER main'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- ### side-effects on global config (was for onelab.eu initially)
- from TestMapper import TestMapper
- main_mapper = TestMapper (plcs,options)
- plc_map = [ ( '*', {'PLC_ROOT_USER' : 'root@test.planet-lab.org',
- 'PLC_MAIL_ENABLED' : 'false',
- }) ]
- plcs = main_mapper.map ( { 'plc' : plc_map } )
-
- import config_ptestqemus
- plcs = config_ptestqemus.config (plcs,options)
- if options.verbose:
- print '======================================== AFTER testqemus'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- import config_pvnodes
- plcs = config_pvnodes.config(plcs,options)
- if options.verbose:
- print '======================================== AFTER vnodes'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- import config_pvplcs
- plcs = config_pvplcs.config (plcs,options)
- if options.verbose:
- print '======================================== AFTER vservers'
- for plc in plcs: TestPlc.TestPlc.display_mapping_plc(plc)
- print '========================================'
-
- return plcs
-
+++ /dev/null
-import sys
-
-from TestMapper import TestMapper
-from TestPool import TestPoolQemu
-
-princeton_qemus_pool = [ ( 'testqemu1.test.planet-lab.org', None, None ) ]
-
-def config (plcs, options):
-
- # all plcs on the same vserver box
- plc_box ='testbox.test.planet-lab.org'
- # informative
- label=options.personality.replace("linux","")
-
- # all qemus on a unique pool of 64bits boxes
- node_map = []
- qemu_pool = TestPoolQemu (princeton_qemus_pool,options)
- for index in range(options.size):
- index += 1
- if options.ips_qemu:
- ip_or_hostname=options.ips_qemu.pop()
- (hostname,ip,unused)=qemu_pool.locate_entry(ip_or_hostname)
- else:
- (hostname,ip,unused) = qemu_pool.next_free()
- node_map += [ ('node%d'%index, {'host_box':hostname},) ]
-
- mapper = {'plc': [ ('*' , {'hostname':plc_box,
- 'PLC_DB_HOST':plc_box,
- 'PLC_API_HOST':plc_box,
- 'PLC_BOOT_HOST':plc_box,
- 'PLC_WWW_HOST':plc_box,
- 'name':'%s-'+label } )
- ],
- 'node': node_map,
- }
-
- return TestMapper(plcs,options).map(mapper)
+++ /dev/null
-# map all nodes onto the avail. pool
-
-import utils
-from TestMapper import TestMapper
-from TestPool import TestPoolIP
-
-onelab_nodes_ip_pool = [
- ("node-01.test.planet-lab.org","128.112.139.44", "de:ad:be:ef:00:10"),
- ("node-02.test.planet-lab.org","128.112.139.66", "de:ad:be:ef:00:20"),
-]
-
-site_dict = {
- 'interface_fields:gateway':'128.112.139.1',
- 'interface_fields:network':'128.112.139.0',
- 'interface_fields:broadcast':'128.112.139.127',
- 'interface_fields:netmask':'255.255.255.128',
- 'interface_fields:dns1': '128.112.136.10',
- 'interface_fields:dns2': '128.112.136.12',
-}
-
-def config (plcs, options):
-
- ip_pool = TestPoolIP (onelab_nodes_ip_pool,options)
- test_mapper = TestMapper (plcs, options)
-
- all_nodenames = test_mapper.node_names()
- maps = []
- for nodename in all_nodenames:
- if options.ips_node:
- ip_or_hostname=options.ips_node.pop()
- (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
- else:
- (hostname,ip,mac) = ip_pool.next_free()
- utils.header('Attaching node %s to %s (%s)'%(nodename,hostname,ip))
- node_dict= {'node_fields:hostname':hostname,
- 'interface_fields:ip':ip,
- 'interface_fields:mac':mac,
- }
-
- node_dict.update(site_dict)
- maps.append ( ( nodename, node_dict) )
-
- plc_map = [ ( '*' , { 'PLC_NET_DNS1' : site_dict [ 'interface_fields:dns1' ],
- 'PLC_NET_DNS2' : site_dict [ 'interface_fields:dns2' ], } ) ]
-
- return test_mapper.map ({'node': maps, 'plc' : plc_map } )
+++ /dev/null
-import utils
-import os.path
-from TestPool import TestPoolIP
-
-# using vplc01 .. vplc15 - keep [16,17,18] for 4.2 and 19 and 20 for long-haul tests
-princeton_plcs_ip_pool = [
- ("plc-01.test.planet-lab.org","128.112.139.34", "de:ad:be:ef:ff:01"),
- ("plc-02.test.planet-lab.org","128.112.139.35", "de:ad:be:ef:ff:02"),
- ("plc-03.test.planet-lab.org","128.112.139.36", "de:ad:be:ef:ff:03"),
- ("plc-04.test.planet-lab.org","128.112.139.37", "de:ad:be:ef:ff:04"),
- ("plc-05.test.planet-lab.org","128.112.139.41", "de:ad:be:ef:ff:05"),
-]
-
-def config (plcs,options):
-
- utils.header ("Turning configuration into a vserver-based one for princeton")
-
- ip_pool = TestPoolIP (princeton_plcs_ip_pool,options)
-
- plc_counter=0
- for plc in plcs:
- try:
- if options.ips_plc :
- ip_or_hostname=options.ips_plc.pop()
- (hostname,ip,mac)=ip_pool.locate_entry(ip_or_hostname)
- if not options.quiet:
- utils.header("Using user-provided %s %s for plc %s"%(
- hostname,ip_or_hostname,plc['name']))
- else:
- (hostname,ip,mac)=ip_pool.next_free()
- if not options.quiet:
- utils.header("Using auto-allocated %s %s for plc %s"%(
- hostname,ip,plc['name']))
-
- ### rewrite fields in plc
- # compute a helpful vserver name - remove domain in hostname
- simplehostname=hostname.split('.')[0]
- vservername = options.buildname
- if len(plcs) == 1 :
- vservername = "%s-%s" % (vservername,simplehostname)
- else:
- plc_counter += 1
- vservername = "%s-%d-%s" % (vservername,plc_counter,simplehostname)
- # apply
- plc['vservername']=vservername
- plc['vserverip']=ip
- plc['name'] = "%s_%s"%(plc['name'],simplehostname)
- utils.header("Attaching plc %s to vserver %s (%s)"%(
- plc['name'],plc['vservername'],plc['vserverip']))
- for key in [ 'PLC_DB_HOST', 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST',]:
- plc[key] = hostname
-
- except:
- raise Exception('Cannot find an available IP for %s - exiting'%plc['name'])
-
- return plcs