from TestBoxQemu import TestBoxQemu
from TestSsh import TestSsh
from TestApiserver import TestApiserver
-from TestSliceSfa import TestSliceSfa
+from TestAuthSfa import TestAuthSfa
# step methods must take (self) and return a boolean (options is a member of the class)
if not node_method(test_node, *args, **kwds): overall=False
return overall
# restore the doc text
- actual.__doc__=method.__doc__
+ actual.__doc__=TestNode.__dict__[method.__name__].__doc__
return actual
def slice_mapper (method):
if not slice_method(test_slice,self.options): overall=False
return overall
# restore the doc text
- actual.__doc__=method.__doc__
+ actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
return actual
-def slice_sfa_mapper (method):
+def auth_sfa_mapper (method):
def actual(self):
overall=True
- slice_method = TestSliceSfa.__dict__[method.__name__]
- for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
- site_spec = self.locate_site (slice_spec['sitename'])
- test_site = TestSite(self,site_spec)
- test_slice=TestSliceSfa(self,test_site,slice_spec)
- if not slice_method(test_slice,self.options): overall=False
+ auth_method = TestAuthSfa.__dict__[method.__name__]
+ for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
+ test_auth=TestAuthSfa(self,auth_spec)
+ if not auth_method(test_auth,self.options): overall=False
return overall
# restore the doc text
- actual.__doc__=method.__doc__
+ actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
return actual
SEP='<sep>'
'show', SEP,
'vs_delete','timestamp_vs','vs_create', SEP,
'plc_install', 'plc_configure', 'plc_start', SEP,
- 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
+ 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
- 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
- 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
- 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
+ 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
+ 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
+ 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
+ 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
# we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
# but as the stress test might take a while, we sometimes missed the debug mode..
'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
- 'check_tcp', 'check_sys_slice', SEP,
+ 'cross_check_tcp@1', 'check_system_slice', SEP,
+ 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
'force_gather_logs', SEP,
]
other_steps = [
'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
'delete_leases', 'list_leases', SEP,
- 'populate' , SEP,
+ 'populate', SEP,
'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
'plc_db_dump' , 'plc_db_restore', SEP,
+ 'check_netflow','check_drl', SEP,
+ 'debug_nodemanager', SEP,
'standby_1_through_20',SEP,
]
return step != SEP and step != SEPSFA
# turn off the sfa-related steps when build has skipped SFA
- # this is originally for centos5 as recent SFAs won't build on this platform
+ # this was originally for centos5 but is still valid
+ # for up to f12 as recent SFAs with sqlalchemy won't build before f14
@staticmethod
def check_whether_build_has_sfa (rpms_url):
+ utils.header ("Checking if build provides SFA package...")
# warning, we're now building 'sface' so let's be a bit more picky
retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
# full builds are expected to return with 0 here
- if retcod!=0:
+ if retcod==0:
+ utils.header("build does provide SFA")
+ else:
# move all steps containing 'sfa' from default_steps to other_steps
- sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
+ utils.header("SFA package not found - removing steps with sfa or sfi")
+ sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
TestPlc.other_steps += sfa_steps
for step in sfa_steps: TestPlc.default_steps.remove(step)
self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
self.apiserver=TestApiserver(self.url,options.dry_run)
+ def has_addresses_api (self):
+ return self.apiserver.has_method('AddIpAddress')
+
def name(self):
name=self.plc_spec['name']
return "%s.%s"%(name,self.vservername)
def run_in_guest_piped (self,local,remote):
return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
+ def yum_check_installed (self, rpms):
+ if isinstance (rpms, list):
+ rpms=" ".join(rpms)
+ return self.run_in_guest("rpm -q %s"%rpms)==0
+
# does a yum install in the vs, ignore yum retcod, check with rpm
def yum_install (self, rpms):
if isinstance (rpms, list):
self.run_in_guest("yum -y install %s"%rpms)
# yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
self.run_in_guest("yum-complete-transaction -y")
- return self.run_in_guest("rpm -q %s"%rpms)==0
+ return self.yum_check_installed (rpms)
def auth_root (self):
return {'Username':self.plc_spec['PLC_ROOT_USER'],
return (site,node)
raise Exception,"Cannot locate hostname %s"%hostname
- def locate_key (self,keyname):
+ def locate_key (self,key_name):
for key in self.plc_spec['keys']:
- if key['name'] == keyname:
+ if key['key_name'] == key_name:
return key
- raise Exception,"Cannot locate key %s"%keyname
+ raise Exception,"Cannot locate key %s"%key_name
+
+ def locate_private_key_from_key_names (self, key_names):
+ # locate the first avail. key
+ found=False
+ for key_name in key_names:
+ key_spec=self.locate_key(key_name)
+ test_key=TestKey(self,key_spec)
+ publickey=test_key.publicpath()
+ privatekey=test_key.privatepath()
+ if os.path.isfile(publickey) and os.path.isfile(privatekey):
+ found=True
+ if found: return privatekey
+ else: return None
def locate_slice (self, slicename):
for slice in self.plc_spec['slices']:
#################### display config
def show (self):
"show test configuration after localization"
- self.display_pass (1)
- self.display_pass (2)
+ self.show_pass (1)
+ self.show_pass (2)
return True
def export (self):
# entry point
always_display_keys=['PLC_WWW_HOST','nodes','sites',]
- def display_pass (self,passno):
+ def show_pass (self,passno):
for (key,val) in self.plc_spec.iteritems():
if not self.options.verbose and key not in TestPlc.always_display_keys: continue
if passno == 2:
print '+ ======== initscript',initscript['initscript_fields']['name']
def display_key_spec (self,key):
- print '+ ======== key',key['name']
+ print '+ ======== key',key['key_name']
def display_slice_spec (self,slice):
print '+ ======== slice',slice['slice_fields']['name']
# write a timestamp in /vservers/<>.timestamp
# cannot be inside the vserver, that causes vserver .. build to cough
def timestamp_vs (self):
+ "Create a timestamp to remember creation date for this plc"
now=int(time.time())
# TODO-lxc check this one
# a first approx. is to store the timestamp close to the VM root like vs does
def delete_all_sites (self):
"Delete all sites in PLC, and related objects"
print 'auth_root',self.auth_root()
- site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
- for site_id in site_ids:
+ sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
+ for site in sites:
+ # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
+ if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
+ site_id=site['site_id']
print 'Deleting site_id',site_id
self.apiserver.DeleteSite(self.auth_root(),site_id)
return True
local_key = "keys/%(vservername)s-debug.rsa"%locals()
else:
message="boot"
- local_key = "keys/key1.rsa"
+ local_key = "keys/key_admin.rsa"
node_infos = self.all_node_infos()
utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
for (nodename,qemuname) in node_infos:
return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
@node_mapper
- def qemu_local_init (self):
- "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
- pass
+ def qemu_local_init (self): pass
@node_mapper
- def bootcd (self):
- "all nodes: invoke GetBootMedium and store result locally"
- pass
+ def bootcd (self): pass
@node_mapper
- def qemu_local_config (self):
- "all nodes: compute qemu config qemu.conf and store it locally"
- pass
+ def qemu_local_config (self): pass
@node_mapper
- def nodestate_reinstall (self):
- "all nodes: mark PLCAPI boot_state as reinstall"
- pass
+ def nodestate_reinstall (self): pass
@node_mapper
- def nodestate_safeboot (self):
- "all nodes: mark PLCAPI boot_state as safeboot"
- pass
+ def nodestate_safeboot (self): pass
@node_mapper
- def nodestate_boot (self):
- "all nodes: mark PLCAPI boot_state as boot"
- pass
+ def nodestate_boot (self): pass
@node_mapper
- def nodestate_show (self):
- "all nodes: show PLCAPI boot_state"
- pass
+ def nodestate_show (self): pass
@node_mapper
- def qemu_export (self):
- "all nodes: push local node-dep directory on the qemu box"
- pass
+ def qemu_export (self): pass
### check hooks : invoke scripts from hooks/{node,slice}
def check_hooks_node (self):
### manage slices
def slices (self):
"create slices with PLCAPI"
- return self.do_slices()
+ return self.do_slices(action="add")
def delete_slices (self):
"delete slices with PLCAPI"
- return self.do_slices("delete")
+ return self.do_slices(action="delete")
+
+ def fill_slices (self):
+ "add nodes in slices with PLCAPI"
+ return self.do_slices(action="fill")
+
+ def empty_slices (self):
+ "remove nodes from slices with PLCAPI"
+ return self.do_slices(action="empty")
def do_slices (self, action="add"):
for slice in self.plc_spec['slices']:
site_spec = self.locate_site (slice['sitename'])
test_site = TestSite(self,site_spec)
test_slice=TestSlice(self,test_site,slice)
- if action != "add":
- utils.header("Deleting slices in site %s"%test_site.name())
+ if action == "delete":
test_slice.delete_slice()
- else:
- utils.pprint("Creating slice",slice)
+ elif action=="fill":
+ test_slice.add_nodes()
+ elif action=="empty":
+ test_slice.delete_nodes()
+ else:
test_slice.create_slice()
- utils.header('Created Slice %s'%slice['slice_fields']['name'])
return True
@slice_mapper
- def ssh_slice(self):
- "tries to ssh-enter the slice with the user key, to ensure slice creation"
- pass
+ def ssh_slice(self): pass
+ @slice_mapper
+ def ssh_slice_off (self): pass
@node_mapper
- def keys_clear_known_hosts (self):
- "remove test nodes entries from the local known_hosts file"
- pass
+ def keys_clear_known_hosts (self): pass
+ def speed_up_slices (self):
+ "tweak nodemanager settings on all nodes using a conf file"
+ # create the template on the server-side
+ template="%s.nodemanager"%self.name()
+ template_file = open (template,"w")
+ template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
+ template_file.close()
+ in_vm="/var/www/html/PlanetLabConf/nodemanager"
+ remote="%s/%s"%(self.vm_root_in_host(),in_vm)
+ self.test_ssh.copy_abs(template,remote)
+ # Add a conf file
+ self.apiserver.AddConfFile (self.auth_root(),
+ {'dest':'/etc/sysconfig/nodemanager',
+ 'source':'PlanetLabConf/nodemanager',
+ 'postinstall_cmd':'service nm restart',})
+ return True
+
+ def debug_nodemanager (self):
+ "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
+ template="%s.nodemanager"%self.name()
+ template_file = open (template,"w")
+ template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
+ template_file.close()
+ in_vm="/var/www/html/PlanetLabConf/nodemanager"
+ remote="%s/%s"%(self.vm_root_in_host(),in_vm)
+ self.test_ssh.copy_abs(template,remote)
+ return True
+
@node_mapper
- def qemu_start (self) :
- "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
- pass
+ def qemu_start (self) : pass
@node_mapper
- def timestamp_qemu (self) :
- "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
- pass
+ def timestamp_qemu (self) : pass
+
+ # when a spec refers to a node possibly on another plc
+ def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
+ for plc in [ self ] + other_plcs:
+ try:
+ return plc.locate_sliver_obj (nodename, slicename)
+ except:
+ pass
+ raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
- def check_tcp (self):
+ # implement this one as a cross step so that we can take advantage of different nodes
+ # in multi-plcs mode
+ def cross_check_tcp (self, other_plcs):
"check TCP connectivity between 2 slices (or in loopback if only one is defined)"
- specs = self.plc_spec['tcp_test']
+ if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
+ utils.header ("check_tcp: no/empty config found")
+ return True
+ specs = self.plc_spec['tcp_specs']
overall=True
for spec in specs:
port = spec['port']
# server side
- s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
+ s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
if not s_test_sliver.run_tcp_server(port,timeout=10):
overall=False
break
# idem for the client side
- c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
+ c_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['client_slice'],other_plcs)
if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
overall=False
return overall
# painfully enough, we need to allow for some time as netflow might show up last
- def check_sys_slice (self):
+ def check_system_slice (self):
"all nodes: check that a system slice is alive"
-# would probably make more sense to check for netflow,
-# but that one is currently not working in the lxc distro
-# return self.check_systemslice ('netflow')
- return self.check_systemslice ('drl')
+ # netflow currently not working in the lxc distro
+ # drl not built at all in the wtx distro
+ # if we find either of them we're happy
+ return self.check_netflow() or self.check_drl()
+ # expose these
+ def check_netflow (self): return self._check_system_slice ('netflow')
+ def check_drl (self): return self._check_system_slice ('drl')
+
# we have the slices up already here, so it should not take too long
- def check_systemslice (self, slicename, timeout_minutes=5, period=15):
+ def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
test_nodes=self.all_nodes()
while test_nodes:
for test_node in test_nodes:
- if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
+ if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
utils.header ("ok")
test_nodes.remove(test_node)
else:
"yum install sfa-plc"
return self.yum_install("sfa-plc")
- def sfa_install_client(self):
- "yum install sfa-client"
- return self.yum_install("sfa-client")
-
def sfa_install_sfatables(self):
"yum install sfa-sfatables"
return self.yum_install ("sfa-sfatables")
+ # for some very odd reason, this sometimes fails with the following symptom
+ # # yum install sfa-client
+ # Setting up Install Process
+ # ...
+ # Downloading Packages:
+ # Running rpm_check_debug
+ # Running Transaction Test
+ # Transaction Test Succeeded
+ # Running Transaction
+ # Transaction couldn't start:
+ # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
+ # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
+ # even though in the same context I have
+ # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
+ # Filesystem Size Used Avail Use% Mounted on
+ # /dev/hdv1 806G 264G 501G 35% /
+ # none 16M 36K 16M 1% /tmp
+ #
+ # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
+ def sfa_install_client(self):
+ "yum install sfa-client"
+ first_try=self.yum_install("sfa-client")
+ if first_try: return True
+ utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
+ (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
+ utils.header("rpm_path=<<%s>>"%rpm_path)
+ # just for checking
+ self.run_in_guest("rpm -i %s"%cached_rpm_path)
+ return self.yum_check_installed ("sfa-client")
+
def sfa_dbclean(self):
"thoroughly wipes off the SFA database"
- self.run_in_guest("sfa-nuke.py")==0 or \
- self.run_in_guest("sfa-nuke-plc.py") or \
- self.run_in_guest("sfaadmin.py registry nuke")
- return True
+ return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
+ self.run_in_guest("sfa-nuke.py")==0 or \
+ self.run_in_guest("sfa-nuke-plc.py")==0
def sfa_plcclean(self):
"cleans the PLC entries that were created as a side effect of running the script"
# ignore result
sfa_spec=self.plc_spec['sfa']
- for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
- slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
- try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
- except: print "Slice %s already absent from PLC db"%slicename
+ for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
+ login_base=auth_sfa_spec['login_base']
+ try: self.apiserver.DeleteSite (self.auth_root(),login_base)
+ except: print "Site %s already absent from PLC db"%login_base
- username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
- try: self.apiserver.DeletePerson(self.auth_root(),username)
- except: print "User %s already absent from PLC db"%username
+ for spec_name in ['pi_spec','user_spec']:
+ user_spec=auth_sfa_spec[spec_name]
+ username=user_spec['email']
+ try: self.apiserver.DeletePerson(self.auth_root(),username)
+ except:
+ # this in fact is expected as sites delete their members
+ #print "User %s already absent from PLC db"%username
+ pass
print "REMEMBER TO RUN sfa_import AGAIN"
return True
'SFA_DB_PASSWORD',
'SFA_DB_NAME',
'SFA_API_LOGLEVEL',
+ 'SFA_GENERIC_FLAVOUR',
+ 'SFA_AGGREGATE_ENABLED',
]:
if self.plc_spec['sfa'].has_key(var):
fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
def sfa_import(self):
"sfa-import-plc"
auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
- return self.run_in_guest('sfa-import.py')==0 or \
- self.run_in_guest('sfa-import-plc.py')==0 or \
- self.run_in_guest('sfaadmin.py registry import_registry')==0
+ return \
+ self.run_in_guest('sfaadmin.py reg import_registry')==0
# not needed anymore
# self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
utils.header("DRY RUN - skipping step")
return True
sfa_spec=self.plc_spec['sfa']
- # cannot use sfa_slice_mapper to pass dir_name
- for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
- site_spec = self.locate_site (slice_spec['sitename'])
- test_site = TestSite(self,site_spec)
- test_slice=TestSliceSfa(self,test_site,slice_spec)
- dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
- test_slice.sfi_config(dir_name)
+ # cannot use auth_sfa_mapper to pass dir_name
+ for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
+ test_slice=TestAuthSfa(self,slice_spec)
+ dir_basename=os.path.basename(test_slice.sfi_path())
+ dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
+ test_slice.sfi_configure(dir_name)
# push into the remote /root/sfi area
location = test_slice.sfi_path()
remote="%s/%s"%(self.vm_root_in_host(),location)
self.run_in_guest("rm -rf /root/sfi")
return True
- @slice_sfa_mapper
- def sfa_add_user(self):
- "run sfi.py add"
- pass
-
- @slice_sfa_mapper
- def sfa_update_user(self):
- "run sfi.py update"
-
- @slice_sfa_mapper
- def sfa_add_slice(self):
- "run sfi.py add (on Registry) from slice.xml"
- pass
-
- @slice_sfa_mapper
- def sfa_discover(self):
- "discover resources into resouces_in.rspec"
- pass
-
- @slice_sfa_mapper
- def sfa_create_slice(self):
- "run sfi.py create (on SM) - 1st time"
- pass
-
- @slice_sfa_mapper
- def sfa_check_slice_plc(self):
- "check sfa_create_slice at the plcs - all local nodes should be in slice"
- pass
-
- @slice_sfa_mapper
- def sfa_update_slice(self):
- "run sfi.py create (on SM) on existing object"
- pass
-
- @slice_sfa_mapper
- def sfa_view(self):
- "various registry-related calls"
- pass
-
- @slice_sfa_mapper
- def ssh_slice_sfa(self):
- "tries to ssh-enter the SFA slice"
- pass
-
- @slice_sfa_mapper
- def sfa_delete_user(self):
- "run sfi.py delete"
- pass
-
- @slice_sfa_mapper
- def sfa_delete_slice(self):
- "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
- pass
+ @auth_sfa_mapper
+ def sfa_add_site (self): pass
+ @auth_sfa_mapper
+ def sfa_add_pi (self): pass
+ @auth_sfa_mapper
+ def sfa_add_user(self): pass
+ @auth_sfa_mapper
+ def sfa_update_user(self): pass
+ @auth_sfa_mapper
+ def sfa_add_slice(self): pass
+ @auth_sfa_mapper
+ def sfa_renew_slice(self): pass
+ @auth_sfa_mapper
+ def sfa_discover(self): pass
+ @auth_sfa_mapper
+ def sfa_create_slice(self): pass
+ @auth_sfa_mapper
+ def sfa_check_slice_plc(self): pass
+ @auth_sfa_mapper
+ def sfa_update_slice(self): pass
+ @auth_sfa_mapper
+ def sfi_list(self): pass
+ @auth_sfa_mapper
+ def sfi_show(self): pass
+ @auth_sfa_mapper
+ def sfi_slices(self): pass
+ @auth_sfa_mapper
+ def ssh_slice_sfa(self): pass
+ @auth_sfa_mapper
+ def sfa_delete_user(self): pass
+ @auth_sfa_mapper
+ def sfa_delete_slice(self): pass
def sfa_stop(self):
"service sfa stop"
test_site = TestSite (self,site_spec)
for node_spec in site_spec['nodes']:
test_node=TestNode(self,test_site,node_spec)
- test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
+ test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
command = test_ssh.actual_command("tar -C /var/log -cf - .")
command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())