X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=system%2FTestPlc.py;h=ab2c501d7c449235dab289178d31108cdd247ac4;hb=c321262ffc57008b1d4e9fd33fbd60fa46a8238a;hp=335db1497fca21032ae2b2da5f89c22e5e2533ab;hpb=33445fec6cf8701a43784067e501f577a6b3a6e4;p=tests.git diff --git a/system/TestPlc.py b/system/TestPlc.py index 335db14..ab2c501 100644 --- a/system/TestPlc.py +++ b/system/TestPlc.py @@ -1,12 +1,13 @@ -# $Id$ +# Thierry Parmentelat +# Copyright (C) 2010 INRIA +# import os, os.path import datetime import time import sys -import xmlrpclib -import datetime import traceback from types import StringTypes +import socket import utils from TestSite import TestSite @@ -15,48 +16,149 @@ from TestUser import TestUser from TestKey import TestKey from TestSlice import TestSlice from TestSliver import TestSliver -from TestBox import TestBox +from TestBoxQemu import TestBoxQemu from TestSsh import TestSsh +from TestApiserver import TestApiserver +from TestSliceSfa import TestSliceSfa -# step methods must take (self, options) and return a boolean +# step methods must take (self) and return a boolean (options is a member of the class) -def standby(minutes): - utils.header('Entering StandBy for %d mn'%minutes) +def standby(minutes,dry_run): + utils.header('Entering StandBy for %d mn'%minutes) + if dry_run: + print 'dry_run' + else: time.sleep(60*minutes) - return True + return True def standby_generic (func): - def actual(self,options): + def actual(self): minutes=int(func.__name__.split("_")[1]) - return standby(minutes) + return standby(minutes,self.options.dry_run) + return actual + +def node_mapper (method): + def actual(self): + overall=True + node_method = TestNode.__dict__[method.__name__] + for site_spec in self.plc_spec['sites']: + test_site = TestSite (self,site_spec) + for node_spec in site_spec['nodes']: + test_node = TestNode (self,test_site,node_spec) + if not node_method(test_node): overall=False + return overall + # restore the doc text + actual.__doc__=method.__doc__ + return actual + +def slice_mapper (method): + def actual(self): + overall=True + slice_method = TestSlice.__dict__[method.__name__] + for slice_spec in self.plc_spec['slices']: + site_spec = self.locate_site (slice_spec['sitename']) + test_site = TestSite(self,site_spec) + test_slice=TestSlice(self,test_site,slice_spec) + if not slice_method(test_slice,self.options): overall=False + return overall + # restore the doc text + actual.__doc__=method.__doc__ return actual +def slice_sfa_mapper (method): + def actual(self): + overall=True + slice_method = TestSliceSfa.__dict__[method.__name__] + for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']: + site_spec = self.locate_site (slice_spec['sitename']) + test_site = TestSite(self,site_spec) + test_slice=TestSliceSfa(self,test_site,slice_spec) + if not slice_method(test_slice,self.options): overall=False + return overall + # restore the doc text + actual.__doc__=method.__doc__ + return actual + +SEP='' +SEPSFA='' + class TestPlc: - def __init__ (self,plc_spec): + default_steps = [ + 'show', SEP, + 'vs_delete','timestamp_vs','vs_create', SEP, + 'plc_install', 'plc_configure', 'plc_start', SEP, + 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP, + 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP, + 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP, + 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP, + 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA, + 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA, + 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA, + 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA, + # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot + # but as the stress test might take a while, we sometimes missed the debug mode.. + 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP, + 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP, + 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA, + 'check_tcp', SEP, + 'force_gather_logs', SEP, + ] + other_steps = [ + 'export', 'show_boxes', SEP, + 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP, + 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP, + 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP, + 'delete_leases', 'list_leases', SEP, + 'populate' , SEP, + 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP, + 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP, + 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA, + 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA, + 'plc_db_dump' , 'plc_db_restore', SEP, + 'standby_1_through_20',SEP, + ] + + @staticmethod + def printable_steps (list): + single_line=" ".join(list)+" " + return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n") + @staticmethod + def valid_step (step): + return step != SEP and step != SEPSFA + + # turn off the sfa-related steps when build has skipped SFA + # this is originally for centos5 as recent SFAs won't build on this platform + @staticmethod + def check_whether_build_has_sfa (rpms_url): + # warning, we're now building 'sface' so let's be a bit more picky + retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url) + # full builds are expected to return with 0 here + if retcod!=0: + # move all steps containing 'sfa' from default_steps to other_steps + sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ] + TestPlc.other_steps += sfa_steps + for step in sfa_steps: TestPlc.default_steps.remove(step) + + def __init__ (self,plc_spec,options): self.plc_spec=plc_spec - self.path=os.path.dirname(sys.argv[0]) - self.test_ssh=TestSsh(self) + self.options=options + self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname) try: self.vserverip=plc_spec['vserverip'] self.vservername=plc_spec['vservername'] self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip'] self.vserver=True except: - self.vserver=False - self.url="https://%s:443/PLCAPI/"%plc_spec['hostname'] - utils.header('Using API url %s'%self.url) - self.server=xmlrpclib.Server(self.url,allow_none=True) + raise Exception,'chroot-based myplc testing is deprecated' + self.apiserver=TestApiserver(self.url,options.dry_run) def name(self): name=self.plc_spec['name'] - if self.vserver: - return name+"[%s]"%self.vservername - else: - return name+"[chroot]" + return "%s.%s"%(name,self.vservername) def hostname(self): - return self.plc_spec['hostname'] + return self.plc_spec['host_box'] def is_local (self): return self.test_ssh.is_local() @@ -65,30 +167,44 @@ class TestPlc: # would help, but not strictly necessary def connect (self): pass + + def actual_command_in_guest (self,command): + return self.test_ssh.actual_command(self.host_to_guest(command)) + + def start_guest (self): + return utils.system(self.test_ssh.actual_command(self.start_guest_in_host())) + + def stop_guest (self): + return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host())) + + def run_in_guest (self,command): + return utils.system(self.actual_command_in_guest(command)) - #command gets run in the chroot/vserver + def run_in_host (self,command): + return self.test_ssh.run_in_buildname(command) + + #command gets run in the vserver def host_to_guest(self,command): - if self.vserver: - return "vserver %s exec %s"%(self.vservername,command) - else: - return "chroot /plc/root %s"%TestSsh.backslash_shell_specials(command) + return "vserver %s exec %s"%(self.vservername,command) - # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data - def copy_in_guest (self, localfile, remotefile, in_data=False): - if in_data: - chroot_dest="/plc/data" - else: - chroot_dest="/plc/root" - if self.is_local(): - if not self.vserver: - utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile)) - else: - utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile)) - else: - if not self.vserver: - utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile)) - else: - utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile)) + #start/stop the vserver + def start_guest_in_host(self): + return "vserver %s start"%(self.vservername) + + def stop_guest_in_host(self): + return "vserver %s stop"%(self.vservername) + + # xxx quick n dirty + def run_in_guest_piped (self,local,remote): + return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True)) + + # does a yum install in the vs, ignore yum retcod, check with rpm + def yum_install (self, rpms): + if isinstance (rpms, list): + rpms=" ".join(rpms) + self.run_in_guest("yum -y install %s"%rpms) + self.run_in_guest("yum-complete-transaction") + return self.run_in_guest("rpm -q %s"%rpms)==0 def auth_root (self): return {'Username':self.plc_spec['PLC_ROOT_USER'], @@ -107,16 +223,60 @@ class TestPlc: def locate_node (self,nodename): for site in self.plc_spec['sites']: for node in site['nodes']: - if node['node_fields']['hostname'] == nodename: + if node['name'] == nodename: return (site,node) raise Exception,"Cannot locate node %s"%nodename + def locate_hostname (self,hostname): + for site in self.plc_spec['sites']: + for node in site['nodes']: + if node['node_fields']['hostname'] == hostname: + return (site,node) + raise Exception,"Cannot locate hostname %s"%hostname + def locate_key (self,keyname): for key in self.plc_spec['keys']: if key['name'] == keyname: return key raise Exception,"Cannot locate key %s"%keyname + def locate_slice (self, slicename): + for slice in self.plc_spec['slices']: + if slice['slice_fields']['name'] == slicename: + return slice + raise Exception,"Cannot locate slice %s"%slicename + + def all_sliver_objs (self): + result=[] + for slice_spec in self.plc_spec['slices']: + slicename = slice_spec['slice_fields']['name'] + for nodename in slice_spec['nodenames']: + result.append(self.locate_sliver_obj (nodename,slicename)) + return result + + def locate_sliver_obj (self,nodename,slicename): + (site,node) = self.locate_node(nodename) + slice = self.locate_slice (slicename) + # build objects + test_site = TestSite (self, site) + test_node = TestNode (self, test_site,node) + # xxx the slice site is assumed to be the node site - mhh - probably harmless + test_slice = TestSlice (self, test_site, slice) + return TestSliver (self, test_node, test_slice) + + def locate_first_node(self): + nodename=self.plc_spec['slices'][0]['nodenames'][0] + (site,node) = self.locate_node(nodename) + test_site = TestSite (self, site) + test_node = TestNode (self, test_site,node) + return test_node + + def locate_first_sliver (self): + slice_spec=self.plc_spec['slices'][0] + slicename=slice_spec['slice_fields']['name'] + nodename=slice_spec['nodenames'][0] + return self.locate_sliver_obj(nodename,slicename) + # all different hostboxes used in this plc def gather_hostBoxes(self): # maps on sites and nodes, return [ (host_box,test_node) ] @@ -127,7 +287,7 @@ class TestPlc: test_node = TestNode (self, test_site, node_spec) if not test_node.is_real(): tuples.append( (test_node.host_box(),test_node) ) - # transform into a dict { 'host_box' -> [ hostnames .. ] } + # transform into a dict { 'host_box' -> [ test_node .. ] } result = {} for (box,node) in tuples: if not result.has_key(box): @@ -137,193 +297,373 @@ class TestPlc: return result # a step for checking this stuff - def showboxes (self,options): - print 'showboxes' + def show_boxes (self): + 'print summary of nodes location' for (box,nodes) in self.gather_hostBoxes().iteritems(): print box,":"," + ".join( [ node.name() for node in nodes ] ) return True # make this a valid step - def kill_all_qemus(self,options): + def qemu_kill_all(self): + 'kill all qemu instances on the qemu boxes involved by this setup' + # this is the brute force version, kill all qemus on that host box for (box,nodes) in self.gather_hostBoxes().iteritems(): - # this is the brute force version, kill all qemus on that host box - TestBox(box,options.buildname).kill_all_qemus() + # pass the first nodename, as we don't push template-qemu on testboxes + nodedir=nodes[0].nodedir() + TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir) return True # make this a valid step - def list_all_qemus(self,options): + def qemu_list_all(self): + 'list all qemu instances on the qemu boxes involved by this setup' for (box,nodes) in self.gather_hostBoxes().iteritems(): - # push the script - TestBox(box,options.buildname).copy("qemu_kill.sh") # this is the brute force version, kill all qemus on that host box - TestBox(box,options.buildname).run_in_buildname("qemu_kill.sh -l") + TestBoxQemu(box,self.options.buildname).qemu_list_all() + return True + + # kill only the right qemus + def qemu_list_mine(self): + 'list qemu instances for our nodes' + for (box,nodes) in self.gather_hostBoxes().iteritems(): + # the fine-grain version + for node in nodes: + node.list_qemu() return True # kill only the right qemus - def force_kill_qemus(self,options): + def qemu_kill_mine(self): + 'kill the qemu instances for our nodes' for (box,nodes) in self.gather_hostBoxes().iteritems(): - # push the script - TestBox(box,options.buildname).copy("qemu_kill.sh") # the fine-grain version for node in nodes: node.kill_qemu() return True - def clear_ssh_config (self,options): - # install local ssh_config file as root's .ssh/config - ssh should be quiet - # dir might need creation first - self.test_ssh.run_in_guest("mkdir /root/.ssh") - self.test_ssh.run_in_guest("chmod 700 /root/.ssh") - # this does not work - > redirection somehow makes it until an argument to cat - #self.run_in_guest_piped("cat ssh_config","cat > /root/.ssh/config") - self.copy_in_guest("ssh_config","/root/.ssh/config",True) - return True - - #################### step methods - - ### uninstall - def uninstall_chroot(self,options): - self.test_ssh.run_in_host('service plc safestop') - #####detecting the last myplc version installed and remove it - self.test_ssh.run_in_host('rpm -e myplc') - ##### Clean up the /plc directory - self.test_ssh.run_in_host('rm -rf /plc/data') - ##### stop any running vservers - self.test_ssh.run_in_host('for vserver in $(ls /vservers/* | sed -e s,/vservers/,,) ; do vserver $vserver stop ; done') - return True - - def uninstall_vserver(self,options): - self.test_ssh.run_in_host("vserver --silent %s delete"%self.vservername) - return True - - def uninstall(self,options): - # if there's a chroot-based myplc running, and then a native-based myplc is being deployed - # it sounds safer to have the former uninstalled too - # now the vserver method cannot be invoked for chroot instances as vservername is required - if self.vserver: - self.uninstall_vserver(options) - self.uninstall_chroot(options) - else: - self.uninstall_chroot(options) + #################### display config + def show (self): + "show test configuration after localization" + self.display_pass (1) + self.display_pass (2) return True - ### install - def install_chroot(self,options): - # nothing to do + def export (self): + "print cut'n paste-able stuff to export env variables to your shell" + # these work but the shell prompt does not get displayed.. + command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername']) + command2="ssh root@%s %s"%(socket.gethostname(),command1) + # guess local domain from hostname + domain=socket.gethostname().split('.',1)[1] + fqdn="%s.%s"%(self.plc_spec['host_box'],domain) + print "export BUILD=%s"%self.options.buildname + print "export PLCHOST=%s"%fqdn + print "export GUEST=%s"%self.plc_spec['vservername'] + # find hostname of first node + (hostname,qemubox) = self.all_node_infos()[0] + print "export KVMHOST=%s.%s"%(qemubox,domain) + print "export NODE=%s"%(hostname) + return True + + # entry point + always_display_keys=['PLC_WWW_HOST','nodes','sites',] + def display_pass (self,passno): + for (key,val) in self.plc_spec.iteritems(): + if not self.options.verbose and key not in TestPlc.always_display_keys: continue + if passno == 2: + if key == 'sites': + for site in val: + self.display_site_spec(site) + for node in site['nodes']: + self.display_node_spec(node) + elif key=='initscripts': + for initscript in val: + self.display_initscript_spec (initscript) + elif key=='slices': + for slice in val: + self.display_slice_spec (slice) + elif key=='keys': + for key in val: + self.display_key_spec (key) + elif passno == 1: + if key not in ['sites','initscripts','slices','keys', 'sfa']: + print '+ ',key,':',val + + def display_site_spec (self,site): + print '+ ======== site',site['site_fields']['name'] + for (k,v) in site.iteritems(): + if not self.options.verbose and k not in TestPlc.always_display_keys: continue + if k=='nodes': + if v: + print '+ ','nodes : ', + for node in v: + print node['node_fields']['hostname'],'', + print '' + elif k=='users': + if v: + print '+ users : ', + for user in v: + print user['name'],'', + print '' + elif k == 'site_fields': + print '+ login_base',':',v['login_base'] + elif k == 'address_fields': + pass + else: + print '+ ', + utils.pprint(k,v) + + def display_initscript_spec (self,initscript): + print '+ ======== initscript',initscript['initscript_fields']['name'] + + def display_key_spec (self,key): + print '+ ======== key',key['name'] + + def display_slice_spec (self,slice): + print '+ ======== slice',slice['slice_fields']['name'] + for (k,v) in slice.iteritems(): + if k=='nodenames': + if v: + print '+ nodes : ', + for nodename in v: + print nodename,'', + print '' + elif k=='usernames': + if v: + print '+ users : ', + for username in v: + print username,'', + print '' + elif k=='slice_fields': + print '+ fields',':', + print 'max_nodes=',v['max_nodes'], + print '' + else: + print '+ ',k,v + + def display_node_spec (self,node): + print "+ node=%s host_box=%s"%(node['name'],node['host_box']), + print "hostname=",node['node_fields']['hostname'], + print "ip=",node['interface_fields']['ip'] + if self.options.verbose: + utils.pprint("node details",node,depth=3) + + # another entry point for just showing the boxes involved + def display_mapping (self): + TestPlc.display_mapping_plc(self.plc_spec) return True - # xxx this would not work with hostname != localhost as mylc-init-vserver was extracted locally - def install_vserver(self,options): - # we need build dir for vtest-init-vserver + @staticmethod + def display_mapping_plc (plc_spec): + print '+ MyPLC',plc_spec['name'] + print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername']) + print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip']) + for site_spec in plc_spec['sites']: + for node_spec in site_spec['nodes']: + TestPlc.display_mapping_node(node_spec) + + @staticmethod + def display_mapping_node (node_spec): + print '+ NODE %s'%(node_spec['name']) + print '+\tqemu box %s'%node_spec['host_box'] + print '+\thostname=%s'%node_spec['node_fields']['hostname'] + + # write a timestamp in /vservers/<>.timestamp + # cannot be inside the vserver, that causes vserver .. build to cough + def timestamp_vs (self): + now=int(time.time()) + return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0 + +# def local_pre (self): +# "run site-dependant pre-test script as defined in LocalTestResources" +# from LocalTestResources import local_resources +# return local_resources.step_pre(self) +# +# def local_post (self): +# "run site-dependant post-test script as defined in LocalTestResources" +# from LocalTestResources import local_resources +# return local_resources.step_post(self) +# +# def local_list (self): +# "run site-dependant list script as defined in LocalTestResources" +# from LocalTestResources import local_resources +# return local_resources.step_list(self) +# +# def local_rel (self): +# "run site-dependant release script as defined in LocalTestResources" +# from LocalTestResources import local_resources +# return local_resources.step_release(self) +# +# def local_rel_plc (self): +# "run site-dependant release script as defined in LocalTestResources" +# from LocalTestResources import local_resources +# return local_resources.step_release_plc(self) +# +# def local_rel_qemu (self): +# "run site-dependant release script as defined in LocalTestResources" +# from LocalTestResources import local_resources +# return local_resources.step_release_qemu(self) +# + def vs_delete(self): + "vserver delete the test myplc" + self.run_in_host("vserver --silent %s delete"%self.vservername) + self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername) + return True + + ### install + # historically the build was being fetched by the tests + # now the build pushes itself as a subdir of the tests workdir + # so that the tests do not have to worry about extracting the build (svn, git, or whatever) + def vs_create (self): + "vserver creation (no install done)" + # push the local build/ dir to the testplc box if self.is_local(): # a full path for the local calls - build_dir=self.path+"/build" - else: - # use a standard name - will be relative to HOME - build_dir="tests-system-build" - build_checkout = "svn checkout %s %s"%(options.build_url,build_dir) - if self.test_ssh.run_in_host(build_checkout) != 0: - raise Exception,"Cannot checkout build dir" - # the repo url is taken from myplc-url - # with the last two steps (i386/myplc...) removed - repo_url = options.myplc_url - repo_url = os.path.dirname(repo_url) - create_vserver="%s/vtest-init-vserver.sh %s %s -- --interface eth0:%s"%\ - (build_dir,self.vservername,repo_url,self.vserverip) - if self.test_ssh.run_in_host(create_vserver) != 0: - raise Exception,"Could not create vserver for %s"%self.vservername - return True - - def install(self,options): - if self.vserver: - return self.install_vserver(options) + build_dir=os.path.dirname(sys.argv[0]) + # sometimes this is empty - set to "." in such a case + if not build_dir: build_dir="." + build_dir += "/build" else: - return self.install_chroot(options) - - ### install_rpm - def cache_rpm(self,url): - self.test_ssh.run_in_host('rm -rf *.rpm') - utils.header('Curling rpm from %s'%url) - id= self.test_ssh.run_in_host('curl -O '+url) - if (id != 0): - raise Exception,"Could not get rpm from %s"%url - return False - return True - - def install_rpm_chroot(self,options): - rpm = os.path.basename(options.myplc_url) - if (not os.path.isfile(rpm)): - self.cache_rpm(options.myplc_url) - utils.header('Installing the : %s'%rpm) - self.test_ssh.run_in_host('rpm -Uvh '+rpm) - self.test_ssh.run_in_host('service plc mount') - return True - - def install_rpm_vserver(self,options): - self.test_ssh.run_in_guest("yum -y install myplc-native") - return True - - def install_rpm(self,options): - if self.vserver: - return self.install_rpm_vserver(options) + # use a standard name - will be relative to remote buildname + build_dir="build" + # remove for safety; do *not* mkdir first, otherwise we end up with build/build/ + self.test_ssh.rmdir(build_dir) + self.test_ssh.copy(build_dir,recursive=True) + # the repo url is taken from arch-rpms-url + # with the last step (i386) removed + repo_url = self.options.arch_rpms_url + for level in [ 'arch' ]: + repo_url = os.path.dirname(repo_url) + # pass the vbuild-nightly options to vtest-init-vserver + test_env_options="" + test_env_options += " -p %s"%self.options.personality + test_env_options += " -d %s"%self.options.pldistro + test_env_options += " -f %s"%self.options.fcdistro + script="vtest-init-vserver.sh" + vserver_name = self.vservername + vserver_options="--netdev eth0 --interface %s"%self.vserverip + try: + vserver_hostname=socket.gethostbyaddr(self.vserverip)[0] + vserver_options += " --hostname %s"%vserver_hostname + except: + print "Cannot reverse lookup %s"%self.vserverip + print "This is considered fatal, as this might pollute the test results" + return False + create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals() + return self.run_in_host(create_vserver) == 0 + + ### install_rpm + def plc_install(self): + "yum install myplc, noderepo, and the plain bootstrapfs" + + # workaround for getting pgsql8.2 on centos5 + if self.options.fcdistro == "centos5": + self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm") + + # compute nodefamily + if self.options.personality == "linux32": + arch = "i386" + elif self.options.personality == "linux64": + arch = "x86_64" else: - return self.install_rpm_chroot(options) + raise Exception, "Unsupported personality %r"%self.options.personality + nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch) + + pkgs_list=[] + pkgs_list.append ("slicerepo-%s"%nodefamily) + pkgs_list.append ("myplc") + pkgs_list.append ("noderepo-%s"%nodefamily) + pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily) + pkgs_string=" ".join(pkgs_list) + return self.yum_install (pkgs_list) ### - def configure(self,options): + def plc_configure(self): + "run plc-config-tty" tmpname='%s.plc-config-tty'%(self.name()) fileconf=open(tmpname,'w') for var in [ 'PLC_NAME', - 'PLC_ROOT_PASSWORD', 'PLC_ROOT_USER', + 'PLC_ROOT_PASSWORD', + 'PLC_SLICE_PREFIX', 'PLC_MAIL_ENABLED', 'PLC_MAIL_SUPPORT_ADDRESS', 'PLC_DB_HOST', +# 'PLC_DB_PASSWORD', + # Above line was added for integrating SFA Testing 'PLC_API_HOST', 'PLC_WWW_HOST', 'PLC_BOOT_HOST', 'PLC_NET_DNS1', - 'PLC_NET_DNS2']: + 'PLC_NET_DNS2', + 'PLC_RESERVATION_GRANULARITY', + 'PLC_OMF_ENABLED', + 'PLC_OMF_XMPP_SERVER', + ]: fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var])) fileconf.write('w\n') fileconf.write('q\n') fileconf.close() utils.system('cat %s'%tmpname) - self.test_ssh.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty') + self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty') utils.system('rm %s'%tmpname) return True - # the chroot install is slightly different to this respect - def start(self, options): - if self.vserver: - self.test_ssh.run_in_guest('service plc start') - else: - self.test_ssh.run_in_host('service plc start') + def plc_start(self): + "service plc start" + self.run_in_guest('service plc start') return True - - def stop(self, options): - if self.vserver: - self.test_ssh.run_in_guest('service plc stop') - else: - self.test_ssh.run_in_host('service plc stop') + + def plc_stop(self): + "service plc stop" + self.run_in_guest('service plc stop') return True - # could use a TestKey class - def store_keys(self, options): + def vs_start (self): + "start the PLC vserver" + self.start_guest() + return True + + def vs_stop (self): + "stop the PLC vserver" + self.stop_guest() + return True + + # stores the keys from the config for further use + def keys_store(self): + "stores test users ssh keys in keys/" for key_spec in self.plc_spec['keys']: - TestKey(self,key_spec).store_key() + TestKey(self,key_spec).store_key() + return True + + def keys_clean(self): + "removes keys cached in keys/" + utils.system("rm -rf ./keys") return True - def clean_keys(self, options): - utils.system("rm -rf %s/keys/"%self.path) + # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/ + # for later direct access to the nodes + def keys_fetch(self): + "gets ssh keys in /etc/planetlab/ and stores them locally in keys/" + dir="./keys" + if not os.path.isdir(dir): + os.mkdir(dir) + vservername=self.vservername + overall=True + prefix = 'debug_ssh_key' + for ext in [ 'pub', 'rsa' ] : + src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals() + dst="keys/%(vservername)s-debug.%(ext)s"%locals() + if self.test_ssh.fetch(src,dst) != 0: overall=False + return overall - def sites (self,options): - return self.do_sites(options) + def sites (self): + "create sites with PLCAPI" + return self.do_sites() - def clean_sites (self,options): - return self.do_sites(options,action="delete") + def delete_sites (self): + "delete sites with PLCAPI" + return self.do_sites(action="delete") - def do_sites (self,options,action="add"): + def do_sites (self,action="add"): for site_spec in self.plc_spec['sites']: test_site = TestSite (self,site_spec) if (action != "add"): @@ -338,12 +678,23 @@ class TestPlc: test_site.create_users() return True - def nodes (self, options): - return self.do_nodes(options) - def clean_nodes (self, options): - return self.do_nodes(options,action="delete") + def delete_all_sites (self): + "Delete all sites in PLC, and related objects" + print 'auth_root',self.auth_root() + site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])] + for site_id in site_ids: + print 'Deleting site_id',site_id + self.apiserver.DeleteSite(self.auth_root(),site_id) + return True - def do_nodes (self, options,action="add"): + def nodes (self): + "create nodes with PLCAPI" + return self.do_nodes() + def delete_nodes (self): + "delete nodes with PLCAPI" + return self.do_nodes(action="delete") + + def do_nodes (self,action="add"): for site_spec in self.plc_spec['sites']: test_site = TestSite (self,site_spec) if action != "add": @@ -360,9 +711,77 @@ class TestPlc: test_node.create_node () return True + def nodegroups (self): + "create nodegroups with PLCAPI" + return self.do_nodegroups("add") + def delete_nodegroups (self): + "delete nodegroups with PLCAPI" + return self.do_nodegroups("delete") + + YEAR = 365*24*3600 + @staticmethod + def translate_timestamp (start,grain,timestamp): + if timestamp < TestPlc.YEAR: return start+timestamp*grain + else: return timestamp + + @staticmethod + def timestamp_printable (timestamp): + return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp)) + + def leases(self): + "create leases (on reservable nodes only, use e.g. run -c default -c resa)" + now=int(time.time()) + grain=self.apiserver.GetLeaseGranularity(self.auth_root()) + print 'API answered grain=',grain + start=(now/grain)*grain + start += grain + # find out all nodes that are reservable + nodes=self.all_reservable_nodenames() + if not nodes: + utils.header ("No reservable node found - proceeding without leases") + return True + ok=True + # attach them to the leases as specified in plc_specs + # this is where the 'leases' field gets interpreted as relative of absolute + for lease_spec in self.plc_spec['leases']: + # skip the ones that come with a null slice id + if not lease_spec['slice']: continue + lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from']) + lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until']) + lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes, + lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until']) + if lease_addition['errors']: + utils.header("Cannot create leases, %s"%lease_addition['errors']) + ok=False + else: + utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\ + (nodes,lease_spec['slice'], + lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']), + lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until']))) + + return ok + + def delete_leases (self): + "remove all leases in the myplc side" + lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())] + utils.header("Cleaning leases %r"%lease_ids) + self.apiserver.DeleteLeases(self.auth_root(),lease_ids) + return True + + def list_leases (self): + "list all leases known to the myplc" + leases = self.apiserver.GetLeases(self.auth_root()) + now=int(time.time()) + for l in leases: + current=l['t_until']>=now + if self.options.verbose or current: + utils.header("%s %s from %s until %s"%(l['hostname'],l['name'], + TestPlc.timestamp_printable(l['t_from']), + TestPlc.timestamp_printable(l['t_until']))) + return True + # create nodegroups if needed, and populate - # no need for a clean_nodegroups if we are careful enough - def nodegroups (self, options): + def do_nodegroups (self, action="add"): # 1st pass to scan contents groups_dict = {} for site_spec in self.plc_spec['sites']: @@ -378,27 +797,80 @@ class TestPlc: groups_dict[nodegroupname]=[] groups_dict[nodegroupname].append(test_node.name()) auth=self.auth_root() + overall = True for (nodegroupname,group_nodes) in groups_dict.iteritems(): - try: - self.server.GetNodeGroups(auth,{'name':nodegroupname})[0] - except: - self.server.AddNodeGroup(auth,{'name':nodegroupname}) - for node in group_nodes: - self.server.AddNodeToNodeGroup(auth,node,nodegroupname) - return True + if action == "add": + print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes + # first, check if the nodetagtype is here + tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname}) + if tag_types: + tag_type_id = tag_types[0]['tag_type_id'] + else: + tag_type_id = self.apiserver.AddTagType(auth, + {'tagname':nodegroupname, + 'description': 'for nodegroup %s'%nodegroupname, + 'category':'test'}) + print 'located tag (type)',nodegroupname,'as',tag_type_id + # create nodegroup + nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname}) + if not nodegroups: + self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes') + print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes' + # set node tag on all nodes, value='yes' + for nodename in group_nodes: + try: + self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes") + except: + traceback.print_exc() + print 'node',nodename,'seems to already have tag',nodegroupname + # check anyway + try: + expect_yes = self.apiserver.GetNodeTags(auth, + {'hostname':nodename, + 'tagname':nodegroupname}, + ['value'])[0]['value'] + if expect_yes != "yes": + print 'Mismatch node tag on node',nodename,'got',expect_yes + overall=False + except: + if not self.options.dry_run: + print 'Cannot find tag',nodegroupname,'on node',nodename + overall = False + else: + try: + print 'cleaning nodegroup',nodegroupname + self.apiserver.DeleteNodeGroup(auth,nodegroupname) + except: + traceback.print_exc() + overall=False + return overall - def all_hostnames (self) : - hostnames = [] + # return a list of tuples (nodename,qemuname) + def all_node_infos (self) : + node_infos = [] for site_spec in self.plc_spec['sites']: - hostnames += [ node_spec['node_fields']['hostname'] \ - for node_spec in site_spec['nodes'] ] - return hostnames + node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \ + for node_spec in site_spec['nodes'] ] + return node_infos + + def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ] + def all_reservable_nodenames (self): + res=[] + for site_spec in self.plc_spec['sites']: + for node_spec in site_spec['nodes']: + node_fields=node_spec['node_fields'] + if 'node_type' in node_fields and node_fields['node_type']=='reservable': + res.append(node_fields['hostname']) + return res - # gracetime : during the first minutes nothing gets printed - def do_nodes_booted (self, minutes, gracetime=2): + # silent_minutes : during the first minutes nothing gets printed + def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15): + if self.options.dry_run: + print 'dry_run' + return True # compute timeout - timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes) - graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime) + timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes) + graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes) # the nodes that haven't checked yet - start with a full list and shrink over time tocheck = self.all_hostnames() utils.header("checking nodes %r"%tocheck) @@ -406,26 +878,26 @@ class TestPlc: status = dict ( [ (hostname,'undef') for hostname in tocheck ] ) while tocheck: # get their status - tocheck_status=self.server.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] ) + tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] ) # update status for array in tocheck_status: hostname=array['hostname'] boot_state=array['boot_state'] - if boot_state == 'boot': - utils.header ("%s has reached the 'boot' state"%hostname) + if boot_state == target_boot_state: + utils.header ("%s has reached the %s state"%(hostname,target_boot_state)) else: # if it's a real node, never mind - (site_spec,node_spec)=self.locate_node(hostname) + (site_spec,node_spec)=self.locate_hostname(hostname) if TestNode.is_real_model(node_spec['node_fields']['model']): utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state)) # let's cheat - boot_state = 'boot' - if datetime.datetime.now() > graceout: + boot_state = target_boot_state + elif datetime.datetime.now() > graceout: utils.header ("%s still in '%s' state"%(hostname,boot_state)) graceout=datetime.datetime.now()+datetime.timedelta(1) status[hostname] = boot_state # refresh tocheck - tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ] + tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ] if not tocheck: return True if datetime.datetime.now() > timeout: @@ -433,97 +905,157 @@ class TestPlc: utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname])) return False # otherwise, sleep for a while - time.sleep(15) + time.sleep(period) # only useful in empty plcs return True - def nodes_booted(self,options): - return self.do_nodes_booted(minutes=5) - - #to scan and store the nodes's public keys and avoid to ask for confirmation when ssh - def scan_publicKeys(self,hostnames): - try: - temp_knownhosts="/root/known_hosts" - remote_knownhosts="/root/.ssh/known_hosts" - self.test_ssh.run_in_host("touch %s"%temp_knownhosts ) - for hostname in hostnames: - utils.header("Scan Public %s key and store it in the known_host file(under the root image) "%hostname) - scan=self.test_ssh.run_in_host('ssh-keyscan -t rsa %s >> %s '%(hostname,temp_knownhosts)) - #Store the public keys in the right root image - self.copy_in_guest(temp_knownhosts,remote_knownhosts,True) - #clean the temp keys file used - self.test_ssh.run_in_host('rm -f %s '%temp_knownhosts ) - except Exception, err: - print err - - def do_check_nodesSsh(self,minutes): + def nodes_booted(self): + return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20) + + def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15): # compute timeout - timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes) - tocheck = self.all_hostnames() - self.scan_publicKeys(tocheck) - utils.header("checking Connectivity on nodes %r"%tocheck) - while tocheck: - for hostname in tocheck: - # try to ssh in nodes - access=self.test_ssh.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s date'%hostname ) - if (not access): - utils.header('The node %s is sshable -->'%hostname) - # refresh tocheck - tocheck.remove(hostname) + timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes) + graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes) + vservername=self.vservername + if debug: + message="debug" + local_key = "keys/%(vservername)s-debug.rsa"%locals() + else: + message="boot" + local_key = "keys/key1.rsa" + node_infos = self.all_node_infos() + utils.header("checking ssh access (expected in %s mode) to nodes:"%message) + for (nodename,qemuname) in node_infos: + utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname)) + utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\ + (timeout_minutes,silent_minutes,period)) + while node_infos: + for node_info in node_infos: + (hostname,qemuname) = node_info + # try to run 'hostname' in the node + command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a") + # don't spam logs - show the command only after the grace period + success = utils.system ( command, silent=datetime.datetime.now() < graceout) + if success==0: + utils.header('Successfully entered root@%s (%s)'%(hostname,message)) + # refresh node_infos + node_infos.remove(node_info) else: - (site_spec,node_spec)=self.locate_node(hostname) + # we will have tried real nodes once, in case they're up - but if not, just skip + (site_spec,node_spec)=self.locate_hostname(hostname) if TestNode.is_real_model(node_spec['node_fields']['model']): utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname) - tocheck.remove(hostname) - if not tocheck: + node_infos.remove(node_info) + if not node_infos: return True if datetime.datetime.now() > timeout: - for hostname in tocheck: - utils.header("FAILURE to ssh into %s"%hostname) + for (hostname,qemuname) in node_infos: + utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname)) return False # otherwise, sleep for a while - time.sleep(15) + time.sleep(period) # only useful in empty plcs return True - def nodes_ssh(self, options): - return self.do_check_nodesSsh(minutes=2) + def ssh_node_debug(self): + "Tries to ssh into nodes in debug mode with the debug ssh key" + return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5) - def bootcd (self, options): - for site_spec in self.plc_spec['sites']: - test_site = TestSite (self,site_spec) - for node_spec in site_spec['nodes']: - test_node=TestNode (self,test_site,node_spec) - test_node.prepare_area() - test_node.create_boot_cd() - test_node.configure_qemu() - return True - - def do_check_intiscripts(self): - for site_spec in self.plc_spec['sites']: - test_site = TestSite (self,site_spec) - test_node = TestNode (self,test_site,site_spec['nodes']) - for slice_spec in self.plc_spec['slices']: - test_slice=TestSlice (self,test_site,slice_spec) - test_sliver=TestSliver(self,test_node,test_slice) - init_status=test_sliver.get_initscript(slice_spec) - if (not init_status): - return False - return init_status + def ssh_node_boot(self): + "Tries to ssh into nodes in production mode with the root ssh key" + return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15) + + @node_mapper + def qemu_local_init (self): + "all nodes : init a clean local directory for holding node-dep stuff like iso image..." + pass + @node_mapper + def bootcd (self): + "all nodes: invoke GetBootMedium and store result locally" + pass + @node_mapper + def qemu_local_config (self): + "all nodes: compute qemu config qemu.conf and store it locally" + pass + @node_mapper + def nodestate_reinstall (self): + "all nodes: mark PLCAPI boot_state as reinstall" + pass + @node_mapper + def nodestate_safeboot (self): + "all nodes: mark PLCAPI boot_state as safeboot" + pass + @node_mapper + def nodestate_boot (self): + "all nodes: mark PLCAPI boot_state as boot" + pass + @node_mapper + def nodestate_show (self): + "all nodes: show PLCAPI boot_state" + pass + @node_mapper + def qemu_export (self): + "all nodes: push local node-dep directory on the qemu box" + pass + + ### check hooks : invoke scripts from hooks/{node,slice} + def check_hooks_node (self): + return self.locate_first_node().check_hooks() + def check_hooks_sliver (self) : + return self.locate_first_sliver().check_hooks() + + def check_hooks (self): + "runs unit tests in the node and slice contexts - see hooks/{node,slice}" + return self.check_hooks_node() and self.check_hooks_sliver() + + ### initscripts + def do_check_initscripts(self): + overall = True + for slice_spec in self.plc_spec['slices']: + if not slice_spec.has_key('initscriptstamp'): + continue + stamp=slice_spec['initscriptstamp'] + for nodename in slice_spec['nodenames']: + (site,node) = self.locate_node (nodename) + # xxx - passing the wrong site - probably harmless + test_site = TestSite (self,site) + test_slice = TestSlice (self,test_site,slice_spec) + test_node = TestNode (self,test_site,node) + test_sliver = TestSliver (self, test_node, test_slice) + if not test_sliver.check_initscript_stamp(stamp): + overall = False + return overall - def check_initscripts(self, options): - return self.do_check_intiscripts() - - def initscripts (self, options): + def check_initscripts(self): + "check that the initscripts have triggered" + return self.do_check_initscripts() + + def initscripts (self): + "create initscripts with PLCAPI" for initscript in self.plc_spec['initscripts']: utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript) - self.server.AddInitScript(self.auth_root(),initscript['initscript_fields']) + self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields']) return True - def slices (self, options): + def delete_initscripts (self): + "delete initscripts with PLCAPI" + for initscript in self.plc_spec['initscripts']: + initscript_name = initscript['initscript_fields']['name'] + print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name'])) + try: + self.apiserver.DeleteInitScript(self.auth_root(),initscript_name) + print initscript_name,'deleted' + except: + print 'deletion went wrong - probably did not exist' + return True + + ### manage slices + def slices (self): + "create slices with PLCAPI" return self.do_slices() - def clean_slices (self, options): + def delete_slices (self): + "delete slices with PLCAPI" return self.do_slices("delete") def do_slices (self, action="add"): @@ -540,40 +1072,396 @@ class TestPlc: utils.header('Created Slice %s'%slice['slice_fields']['name']) return True - def check_slices(self, options): - for slice_spec in self.plc_spec['slices']: + @slice_mapper + def ssh_slice(self): + "tries to ssh-enter the slice with the user key, to ensure slice creation" + pass + + @node_mapper + def keys_clear_known_hosts (self): + "remove test nodes entries from the local known_hosts file" + pass + + @node_mapper + def qemu_start (self) : + "all nodes: start the qemu instance (also runs qemu-bridge-init start)" + pass + + @node_mapper + def timestamp_qemu (self) : + "all nodes: start the qemu instance (also runs qemu-bridge-init start)" + pass + + def check_tcp (self): + "check TCP connectivity between 2 slices (or in loopback if only one is defined)" + specs = self.plc_spec['tcp_test'] + overall=True + for spec in specs: + port = spec['port'] + # server side + s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice']) + if not s_test_sliver.run_tcp_server(port,timeout=10): + overall=False + break + + # idem for the client side + c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice']) + if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port): + overall=False + return overall + + def plcsh_stress_test (self): + "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents" + # install the stress-test in the plc image + location = "/usr/share/plc_api/plcsh_stress_test.py" + remote="/vservers/%s/%s"%(self.vservername,location) + self.test_ssh.copy_abs("plcsh_stress_test.py",remote) + command = location + command += " -- --check" + if self.options.size == 1: + command += " --tiny" + return ( self.run_in_guest(command) == 0) + + # populate runs the same utility without slightly different options + # in particular runs with --preserve (dont cleanup) and without --check + # also it gets run twice, once with the --foreign option for creating fake foreign entries + + def sfa_install_all (self): + "yum install sfa sfa-plc sfa-sfatables sfa-client" + return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client") + + def sfa_install_core(self): + "yum install sfa" + return self.yum_install ("sfa") + + def sfa_install_plc(self): + "yum install sfa-plc" + return self.yum_install("sfa-plc") + + def sfa_install_client(self): + "yum install sfa-client" + return self.yum_install("sfa-client") + + def sfa_install_sfatables(self): + "yum install sfa-sfatables" + return self.yum_install ("sfa-sfatables") + + def sfa_dbclean(self): + "thoroughly wipes off the SFA database" + self.run_in_guest("sfa-nuke-plc.py")==0 + return True + + def sfa_plcclean(self): + "cleans the PLC entries that were created as a side effect of running the script" + # ignore result + sfa_spec=self.plc_spec['sfa'] + + for sfa_slice_spec in sfa_spec['sfa_slice_specs']: + slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename']) + try: self.apiserver.DeleteSlice(self.auth_root(),slicename) + except: print "Slice %s already absent from PLC db"%slicename + + username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain']) + try: self.apiserver.DeletePerson(self.auth_root(),username) + except: print "User %s already absent from PLC db"%username + + print "REMEMBER TO RUN sfa_import AGAIN" + return True + + def sfa_uninstall(self): + "uses rpm to uninstall sfa - ignore result" + self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc") + self.run_in_guest("rm -rf /var/lib/sfa") + self.run_in_guest("rm -rf /etc/sfa") + self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon") + # xxx tmp + self.run_in_guest("rpm -e --noscripts sfa-plc") + return True + + ### run unit tests for SFA + # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason + # Running Transaction + # Transaction couldn't start: + # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem + # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))] + # no matter how many Gbs are available on the testplc + # could not figure out what's wrong, so... + # if the yum install phase fails, consider the test is successful + # other combinations will eventually run it hopefully + def sfa_utest(self): + "yum install sfa-tests and run SFA unittests" + self.run_in_guest("yum -y install sfa-tests") + # failed to install - forget it + if self.run_in_guest("rpm -q sfa-tests")!=0: + utils.header("WARNING: SFA unit tests failed to install, ignoring") + return True + return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0 + + ### + def confdir(self): + dirname="conf.%s"%self.plc_spec['name'] + if not os.path.isdir(dirname): + utils.system("mkdir -p %s"%dirname) + if not os.path.isdir(dirname): + raise "Cannot create config dir for plc %s"%self.name() + return dirname + + def conffile(self,filename): + return "%s/%s"%(self.confdir(),filename) + def confsubdir(self,dirname,clean,dry_run=False): + subdirname="%s/%s"%(self.confdir(),dirname) + if clean: + utils.system("rm -rf %s"%subdirname) + if not os.path.isdir(subdirname): + utils.system("mkdir -p %s"%subdirname) + if not dry_run and not os.path.isdir(subdirname): + raise "Cannot create config subdir %s for plc %s"%(dirname,self.name()) + return subdirname + + def conffile_clean (self,filename): + filename=self.conffile(filename) + return utils.system("rm -rf %s"%filename)==0 + + ### + def sfa_configure(self): + "run sfa-config-tty" + tmpname=self.conffile("sfa-config-tty") + fileconf=open(tmpname,'w') + for var in [ 'SFA_REGISTRY_ROOT_AUTH', + 'SFA_INTERFACE_HRN', + 'SFA_REGISTRY_LEVEL1_AUTH', + 'SFA_REGISTRY_HOST', + 'SFA_AGGREGATE_HOST', + 'SFA_SM_HOST', + 'SFA_PLC_URL', + 'SFA_PLC_USER', + 'SFA_PLC_PASSWORD', + 'SFA_DB_HOST', + 'SFA_DB_USER', + 'SFA_DB_PASSWORD', + 'SFA_DB_NAME', + 'SFA_API_LOGLEVEL', + ]: + if self.plc_spec['sfa'].has_key(var): + fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var])) + # the way plc_config handles booleans just sucks.. + for var in []: + val='false' + if self.plc_spec['sfa'][var]: val='true' + fileconf.write ('e %s\n%s\n'%(var,val)) + fileconf.write('w\n') + fileconf.write('R\n') + fileconf.write('q\n') + fileconf.close() + utils.system('cat %s'%tmpname) + self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty') + return True + + def aggregate_xml_line(self): + port=self.plc_spec['sfa']['neighbours-port'] + return '' % \ + (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port) + + def registry_xml_line(self): + return '' % \ + (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']) + + + # a cross step that takes all other plcs in argument + def cross_sfa_configure(self, other_plcs): + "writes aggregates.xml and registries.xml that point to all other PLCs in the test" + # of course with a single plc, other_plcs is an empty list + if not other_plcs: + return True + agg_fname=self.conffile("agg.xml") + file(agg_fname,"w").write("%s\n" % \ + " ".join([ plc.aggregate_xml_line() for plc in other_plcs ])) + utils.header ("(Over)wrote %s"%agg_fname) + reg_fname=self.conffile("reg.xml") + file(reg_fname,"w").write("%s\n" % \ + " ".join([ plc.registry_xml_line() for plc in other_plcs ])) + utils.header ("(Over)wrote %s"%reg_fname) + return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \ + and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0 + + def sfa_import(self): + "sfa-import-plc" + auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'] + return self.run_in_guest('sfa-import-plc.py')==0 +# not needed anymore +# self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth)) + + def sfa_start(self): + "service sfa start" + return self.run_in_guest('service sfa start')==0 + + def sfi_configure(self): + "Create /root/sfi on the plc side for sfi client configuration" + if self.options.dry_run: + utils.header("DRY RUN - skipping step") + return True + sfa_spec=self.plc_spec['sfa'] + # cannot use sfa_slice_mapper to pass dir_name + for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']: site_spec = self.locate_site (slice_spec['sitename']) test_site = TestSite(self,site_spec) - test_slice=TestSlice(self,test_site,slice_spec) - status=test_slice.do_check_slice(options) - if (not status): - return False - return status - - def start_nodes (self, options): - utils.header("Starting nodes") + test_slice=TestSliceSfa(self,test_site,slice_spec) + dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run) + test_slice.sfi_config(dir_name) + # push into the remote /root/sfi area + location = test_slice.sfi_path() + remote="/vservers/%s/%s"%(self.vservername,location) + self.test_ssh.mkdir(remote,abs=True) + # need to strip last level or remote otherwise we get an extra dir level + self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True) + + return True + + def sfi_clean (self): + "clean up /root/sfi on the plc side" + self.run_in_guest("rm -rf /root/sfi") + return True + + @slice_sfa_mapper + def sfa_add_user(self): + "run sfi.py add" + pass + + @slice_sfa_mapper + def sfa_update_user(self): + "run sfi.py update" + + @slice_sfa_mapper + def sfa_add_slice(self): + "run sfi.py add (on Registry) from slice.xml" + pass + + @slice_sfa_mapper + def sfa_discover(self): + "discover resources into resouces_in.rspec" + pass + + @slice_sfa_mapper + def sfa_create_slice(self): + "run sfi.py create (on SM) - 1st time" + pass + + @slice_sfa_mapper + def sfa_check_slice_plc(self): + "check sfa_create_slice at the plcs - all local nodes should be in slice" + pass + + @slice_sfa_mapper + def sfa_update_slice(self): + "run sfi.py create (on SM) on existing object" + pass + + @slice_sfa_mapper + def sfa_view(self): + "various registry-related calls" + pass + + @slice_sfa_mapper + def ssh_slice_sfa(self): + "tries to ssh-enter the SFA slice" + pass + + @slice_sfa_mapper + def sfa_delete_user(self): + "run sfi.py delete" + pass + + @slice_sfa_mapper + def sfa_delete_slice(self): + "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices" + pass + + def sfa_stop(self): + "service sfa stop" + self.run_in_guest('service sfa stop')==0 + return True + + def populate (self): + "creates random entries in the PLCAPI" + # install the stress-test in the plc image + location = "/usr/share/plc_api/plcsh_stress_test.py" + remote="/vservers/%s/%s"%(self.vservername,location) + self.test_ssh.copy_abs("plcsh_stress_test.py",remote) + command = location + command += " -- --preserve --short-names" + local = (self.run_in_guest(command) == 0); + # second run with --foreign + command += ' --foreign' + remote = (self.run_in_guest(command) == 0); + return ( local and remote) + + def gather_logs (self): + "gets all possible logs from plc's/qemu node's/slice's for future reference" + # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log./* + # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log./* + # (2) get all the nodes qemu log and store it as logs/node.qemu..log + # (3) get the nodes /var/log and store is as logs/node.var-log./* + # (4) as far as possible get the slice's /var/log as logs/sliver.var-log./* + # (1.a) + print "-------------------- TestPlc.gather_logs : PLC's /var/log" + self.gather_var_logs () + # (1.b) + print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/" + self.gather_pgsql_logs () + # (2) + print "-------------------- TestPlc.gather_logs : nodes's QEMU logs" for site_spec in self.plc_spec['sites']: - TestSite(self,site_spec).start_nodes (options) + test_site = TestSite (self,site_spec) + for node_spec in site_spec['nodes']: + test_node=TestNode(self,test_site,node_spec) + test_node.gather_qemu_logs() + # (3) + print "-------------------- TestPlc.gather_logs : nodes's /var/log" + self.gather_nodes_var_logs() + # (4) + print "-------------------- TestPlc.gather_logs : sample sliver's /var/log" + self.gather_slivers_var_logs() return True - def stop_nodes (self, options): - self.kill_all_qemus(options) + def gather_slivers_var_logs(self): + for test_sliver in self.all_sliver_objs(): + remote = test_sliver.tar_var_logs() + utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name()) + command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name() + utils.system(command) return True - def check_tcp (self, options): - #we just need to create a sliver object nothing else - test_sliver=TestSliver(self, - TestNode(self, TestSite(self,self.plc_spec['sites'][0]), - self.plc_spec['sites'][0]['nodes'][0]), - TestSlice(self,TestSite(self,self.plc_spec['sites'][0]), - self.plc_spec['slices'])) - return test_sliver.do_check_tcp(self.plc_spec['tcp_param'],options) + def gather_var_logs (self): + utils.system("mkdir -p logs/myplc.var-log.%s"%self.name()) + to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .") + command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name() + utils.system(command) + command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name() + utils.system(command) + + def gather_pgsql_logs (self): + utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name()) + to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .") + command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name() + utils.system(command) + + def gather_nodes_var_logs (self): + for site_spec in self.plc_spec['sites']: + test_site = TestSite (self,site_spec) + for node_spec in site_spec['nodes']: + test_node=TestNode(self,test_site,node_spec) + test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa") + command = test_ssh.actual_command("tar -C /var/log -cf - .") + command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name() + utils.system("mkdir -p logs/node.var-log.%s"%test_node.name()) + utils.system(command) + # returns the filename to use for sql dump/restore, using options.dbname if set - def dbfile (self, database, options): + def dbfile (self, database): # uses options.dbname if it is found try: - name=options.dbname + name=self.options.dbname if not isinstance(name,StringTypes): raise Exception except: @@ -582,26 +1470,30 @@ class TestPlc: name=str(d) return "/root/%s-%s.sql"%(database,name) - def db_dump(self, options): - - dump=self.dbfile("planetab4",options) - self.test_ssh.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump) - utils.header('Dumped planetlab4 database in %s'%dump) + def plc_db_dump(self): + 'dump the planetlab5 DB in /root in the PLC - filename has time' + dump=self.dbfile("planetab5") + self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump) + utils.header('Dumped planetlab5 database in %s'%dump) return True - def db_restore(self, options): - dump=self.dbfile("planetab4",options) + def plc_db_restore(self): + 'restore the planetlab5 DB - looks broken, but run -n might help' + dump=self.dbfile("planetab5") ##stop httpd service - self.test_ssh.run_in_guest('service httpd stop') + self.run_in_guest('service httpd stop') # xxx - need another wrapper - self.test_ssh.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1') - self.test_ssh.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4') - self.test_ssh.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump) + self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1') + self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5') + self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump) ##starting httpd service - self.test_ssh.run_in_guest('service httpd start') + self.run_in_guest('service httpd start') utils.header('Database restored from ' + dump) + def standby_1_through_20(self): + """convenience function to wait for a specified number of minutes""" + pass @standby_generic def standby_1(): pass @standby_generic @@ -642,4 +1534,3 @@ class TestPlc: def standby_19(): pass @standby_generic def standby_20(): pass -