From 2196bdedb941f1cbd8f30ad55e26b55573a58b20 Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Wed, 21 Nov 2007 19:39:17 +0000 Subject: [PATCH] organizing the tests area --- .../My_Virtual_Machine.vmx | 121 ---------- .../My_Virtual_Machine_model-s001.vmdk | Bin 327680 -> 0 bytes .../My_Virtual_Machine_model-s002.vmdk | Bin 327680 -> 0 bytes .../My_Virtual_Machine_model-s003.vmdk | Bin 327680 -> 0 bytes .../My_Virtual_Machine_model-s004.vmdk | Bin 327680 -> 0 bytes .../My_Virtual_Machine_model-s005.vmdk | Bin 327680 -> 0 bytes .../My_Virtual_Machine_model-s006.vmdk | Bin 196608 -> 0 bytes .../My_Virtual_Machine_model.vmdk | 22 -- plctest/TestConfig.py | 131 ---------- plctest/TestMain.py | 100 -------- plctest/TestNode.py | 94 -------- plctest/TestPlc.py | 101 -------- plctest/TestRestore.py | 109 --------- plctest/TestSite.py | 223 ------------------ plctest/tty_conf | 24 -- 15 files changed, 925 deletions(-) delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine.vmx delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk delete mode 100644 plctest/My-Virtual-Machine-model/My_Virtual_Machine_model.vmdk delete mode 100644 plctest/TestConfig.py delete mode 100755 plctest/TestMain.py delete mode 100644 plctest/TestNode.py delete mode 100644 plctest/TestPlc.py delete mode 100755 plctest/TestRestore.py delete mode 100644 plctest/TestSite.py delete mode 100644 plctest/tty_conf diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine.vmx b/plctest/My-Virtual-Machine-model/My_Virtual_Machine.vmx deleted file mode 100644 index ee9e680f..00000000 --- a/plctest/My-Virtual-Machine-model/My_Virtual_Machine.vmx +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/vmplayer - -# Filename: My_Virtual_Machine.vmx -# Generated 2007-05-28;11:47:54 by EasyVMX! -# http://www.easyvmx.com - -# This is a Workstation 5 or 5.5 config file -# It can be used with Player -config.version = "8" -virtualHW.version = "4" - -# Selected operating system for your virtual machine -guestOS = "other" - -# displayName is your own name for the virtual machine -#displayName - -# These fields are free text description fields -guestinfo.vmware.product.url = "amine.parmentelat.net" -guestinfo.vmware.product.class = "virtual machine" - -# Number of virtual CPUs. Your virtual machine will not -# work if this number is higher than the number of your physical CPUs -numvcpus = "1" - -# Memory size and other memory settings -memsize = "320" -MemAllowAutoScaleDown = "FALSE" -MemTrimRate = "-1" - -# Unique ID for the virtual machine will be created -uuid.action = "create" - -# Remind to install VMware Tools -# This setting has no effect in VMware Player -tools.remindInstall = "TRUE" - -# Startup hints interfers with automatic startup of a virtual machine -# This setting has no effect in VMware Player -hints.hideAll = "TRUE" - -# Enable time synchronization between computer -# and virtual machine -tools.syncTime = "TRUE" - -# USB settings -# This config activates USB -usb.present = "TRUE" -usb.generic.autoconnect = "FALSE" - -# First serial port, physical COM1 is available -serial0.present = "FALSE" -serial0.fileName = "Auto Detect" -serial0.autodetect = "TRUE" -serial0.hardwareFlowControl = "TRUE" - -# Optional second serial port, physical COM2 is not available -serial1.present = "FALSE" - -# First parallell port, physical LPT1 is available -parallel0.present = "FALSE" -parallel0.fileName = "Auto Detect" -parallel0.autodetect = "TRUE" -parallel0.bidirectional = "TRUE" - -# Logging -# This config activates logging, and keeps last log -logging = "TRUE" -log.fileName = "My_Virtual_Machine.log" -log.append = "TRUE" -log.keepOld = "1" - -# These settings decides interaction between your -# computer and the virtual machine -isolation.tools.hgfs.disable = "FALSE" -isolation.tools.dnd.disable = "FALSE" -isolation.tools.copy.enable = "TRUE" -isolation.tools.paste.enabled = "TRUE" - -# First network interface card -ethernet0.present = "TRUE" -#ethernet0.virtualDev = "vmxnet" -ethernet0.connectionType = "nat" -ethernet0.addressType = "generated" -ethernet0.generatedAddressOffset = "0" - -# Settings for physical floppy drive -floppy0.present = "FALSE" - -# Settings for physical CDROM drive -ide1:0.present = "TRUE" -ide1:0.deviceType = "cdrom-raw" -ide1:0.startConnected = "FALSE" -ide1:0.fileName = "/dev/cdrom" - -# Settings for the optional virtual CDROM, ISO-image -ide1:1.present = "TRUE" -*ide1:1.fileName = "amine.parmentelat.net-3.3.iso" -ide1:1.deviceType = "cdrom-image" -ide1:1.mode = "persistent" -ide1:1.startConnected = "TRUE" - -# LsiLogic SCSI card -scsi0.present = "TRUE" -scsi0.virtualDev = "lsilogic" - -# First SCSI disk image, size 11Gb -scsi0:0.present = "TRUE" -scsi0:0.fileName = "My_Virtual_Machine_model.vmdk" -scsi0:0.mode = "persistent" -scsi0:0.startConnected = "TRUE" -scsi0:0.writeThrough = "TRUE" - -# END OF EasyVMX! CONFIG - -scsi0:0.redo = "" -ethernet0.generatedAddress = "00:0c:29:24:ff:8f" -uuid.location = "56 4d 4f 31 ef 1d 6d 78-35 2f 38 b2 a2 24 ff 8f" -uuid.bios = "56 4d 4f 31 ef 1d 6d 78-35 2f 38 b2 a2 24 ff 8f" - -usb.autoConnect.device0 = "path:1/0 autoclean:1" diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk b/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk deleted file mode 100644 index d6d0e8140e2b99a9ce09daa1ca611c38efd938de..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327680 zcmeIvQN$kv6vpA-`Nm>dqbQ0tjYUxuMbW4!HWiCSQ4~e7Q4~deijAUC6dT1xu_!i* zq9}@@D2k#{6vd)wDJuW@F1y{u$Mamgj^kXNcgMDycTKgX>d&oH$dVK21si&v*O+7ocf9i#)m!=L*y)t!p>b0rY zr{0`8Hud(@yHoE^eK_^;)TdLQPklM{_0+dh$ESXn`f2Kysb8mlpE^19_td|0+TRNR z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 m0000000000000000000000000000000000000026YW)XhPDD2V diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk b/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk deleted file mode 100644 index d6d0e8140e2b99a9ce09daa1ca611c38efd938de..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327680 zcmeIvQN$kv6vpA-`Nm>dqbQ0tjYUxuMbW4!HWiCSQ4~e7Q4~deijAUC6dT1xu_!i* zq9}@@D2k#{6vd)wDJuW@F1y{u$Mamgj^kXNcgMDycTKgX>d&oH$dVK21si&v*O+7ocf9i#)m!=L*y)t!p>b0rY zr{0`8Hud(@yHoE^eK_^;)TdLQPklM{_0+dh$ESXn`f2Kysb8mlpE^19_td|0+TRNR z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 m0000000000000000000000000000000000000026YW)XhPDD2V diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk b/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk deleted file mode 100644 index d6d0e8140e2b99a9ce09daa1ca611c38efd938de..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327680 zcmeIvQN$kv6vpA-`Nm>dqbQ0tjYUxuMbW4!HWiCSQ4~e7Q4~deijAUC6dT1xu_!i* zq9}@@D2k#{6vd)wDJuW@F1y{u$Mamgj^kXNcgMDycTKgX>d&oH$dVK21si&v*O+7ocf9i#)m!=L*y)t!p>b0rY zr{0`8Hud(@yHoE^eK_^;)TdLQPklM{_0+dh$ESXn`f2Kysb8mlpE^19_td|0+TRNR z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 m0000000000000000000000000000000000000026YW)XhPDD2V diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk b/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk deleted file mode 100644 index d6d0e8140e2b99a9ce09daa1ca611c38efd938de..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327680 zcmeIvQN$kv6vpA-`Nm>dqbQ0tjYUxuMbW4!HWiCSQ4~e7Q4~deijAUC6dT1xu_!i* zq9}@@D2k#{6vd)wDJuW@F1y{u$Mamgj^kXNcgMDycTKgX>d&oH$dVK21si&v*O+7ocf9i#)m!=L*y)t!p>b0rY zr{0`8Hud(@yHoE^eK_^;)TdLQPklM{_0+dh$ESXn`f2Kysb8mlpE^19_td|0+TRNR z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 m0000000000000000000000000000000000000026YW)XhPDD2V diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk b/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk deleted file mode 100644 index d6d0e8140e2b99a9ce09daa1ca611c38efd938de..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 327680 zcmeIvQN$kv6vpA-`Nm>dqbQ0tjYUxuMbW4!HWiCSQ4~e7Q4~deijAUC6dT1xu_!i* zq9}@@D2k#{6vd)wDJuW@F1y{u$Mamgj^kXNcgMDycTKgX>d&oH$dVK21si&v*O+7ocf9i#)m!=L*y)t!p>b0rY zr{0`8Hud(@yHoE^eK_^;)TdLQPklM{_0+dh$ESXn`f2Kysb8mlpE^19_td|0+TRNR z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 z00000000000000000000000000000000000000000000000000000000000000000 m0000000000000000000000000000000000000026YW)XhPDD2V diff --git a/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk b/plctest/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk deleted file mode 100644 index d9de1f8f8a098aebe4095d96dfee8ef0ea1117d7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 196608 zcmeIvAx{+m6o%pV!loM=1cSlgn1MNifnXpQ7$e9e7zhS}fnXpQ2n5Fr7z_r3!9Xw& z3PlW44Z#|yXOht*}k20&eFoel|eOLb$fc4ZJp= zX!~Mr{VOJ>CnqjCE|0P5N;Ul5RCTSoUfrl}R=28|>P~gHnyv0tbJhK7zFMpvRLj+) zYNdKytyWK}wd!fLUOlTes?F+o_2TlJ3IG5A000000000000000000000000000000 z000000000000000000000000000000000000000000000002hD%j#A2x_VQ+t=?5T z)%$9<`cQqW_Nx8rp!!rDR-dac)z|7Creating plc at '+timset+':',plc_spec - test_plc = TestPlc(plc_spec) - test_plc.connect() - test_plcs.append(test_plc) - test_plc.cleanup_plc() - print '========>Installing myplc at: ', timset - if (len(sys.argv) > 1): - test_plc.install_plc(url) - test_plc.config_plc(plc_spec) - else : - print "========>PLease insert a valid url for the myplc install" - ##create all the sites under the new plc,and then populate them with - ##nodes,persons and slices - for site_spec in plc_spec['sites']: - print '========>Creating site at '+timset+ ':',site_spec - test_site = test_plc.init_site(site_spec) - for node_spec in site_spec['nodes']: - print '========>Creating node at '+ timset+' :',node_spec - test_nodes.append(node_spec) - test_node = test_plc.init_node(test_site,node_spec,self.path) - test_node.create_slice ("pi") - print 'Runing Checkers and Vmwares for Site nodes at :',timset - test_site.run_vmware(test_nodes,display) - if(test_site.node_check_status(test_nodes,True)): - test_plc.db_dump() - test_site.slice_access(test_nodes) - print "all is alright" - return 0 - else : - print "There is something wrong" - sys.exit(1) - except Exception, e: - print str(e) - sys.exit(1) - -if __name__ == "__main__": - TestMain().main() diff --git a/plctest/TestNode.py b/plctest/TestNode.py deleted file mode 100644 index 0ca91ba3..00000000 --- a/plctest/TestNode.py +++ /dev/null @@ -1,94 +0,0 @@ -import os -import sys -import time -import base64 -import TestConfig -import xmlrpclib - -class TestNode: - - def __init__ (self,test_plc,test_site,node_spec): - self.test_plc=test_plc - self.test_site=test_site - self.node_spec=node_spec - self.timset=time.strftime("%H:%M:%S", time.localtime()) - def create_node (self,role): - auth = self.test_site.anyuser_auth (role) - filter={'boot_state':'rins'} - try: - if (role=='pi' and self.node_spec['owned']=='pi'): - self.node_id = self.test_plc.server.AddNode(auth, - self.test_site.site_spec['site_fields']['login_base'], - self.node_spec) - self.test_plc.server.AddNodeNetwork(auth,self.node_id, - self.node_spec['network']) - self.test_plc.server.UpdateNode(auth, self.node_id, filter) - return self.node_id - - elif (role=='tech' and self.node_spec['owned']=='tech'): - self.node_id = self.test_plc.server.AddNode(auth, - self.test_site.site_spec['site_fields']['login_base'], - self.node_spec) - self.test_plc.server.AddNodeNetwork(auth,self.node_id, - self.node_spec['network']) - self.test_plc.server.UpdateNode(auth, self.node_id, filter) - return self.node_id - except Exception, e: - print str(e) - - def create_slice(self, role): - auth = self.test_site.anyuser_auth (role) - liste_hosts=[] - #for l in liste_nodes_spec : - # liste_hosts.append(l['hostname']) - try: - for slicespec in TestConfig.slices_specs : - print '========>Creating slice at :'+self.timset+' : ',slicespec - slice_id=self.test_plc.server.AddSlice(auth,slicespec['slice_spec']) - for sliceuser in slicespec['slice_users']: - self.test_plc.server.AddPersonToSlice(auth, sliceuser['email'], slice_id)##affecting person to the slice - for slicenode in slicespec['slice_nodes']: - liste_hosts.append(slicenode['hostname']) - self.test_plc.server.AddSliceToNodes(auth, slice_id, liste_hosts)##add slice to the spec nodes - print 'fin creation slices' - except Exception, e: - print str(e) - sys.exit(1) - - def conffile(self,image,hostname,path): - try: - file=path+'/VirtualFile-'+hostname+'/My_Virtual_Machine.vmx' - f2=open(file,'w') - - f1=open(path+'/My-Virtual-Machine-model/My_Virtual_Machine.vmx','r') - while 1: - txt = f1.readline() - if txt=='': - f1.close() - f2.close() - break - if txt[0]!='*' : - f2.write(txt) - else : - f2.write('ide1:1.fileName = '+'"'+image+'"' '\n') - - - except Exception, e: - print str(e) - - def create_boot_cd(self,node_spec,path): - try: - os.system('mkdir -p %s/VirtualFile-%s && cp %s/My-Virtual-Machine-model/* %s/VirtualFile-%s' - %(path, node_spec['hostname'], path, path, node_spec['hostname'])) - link1=self.test_plc.server.GetBootMedium(self.test_plc.auth_root(), - node_spec['hostname'], 'node-iso', '') - if (link1 == ''): - raise Exception, 'boot.iso not found' - file1=open(path+'/VirtualFile-'+node_spec['hostname']+'/boot_file.iso','w') - file1.write(base64.b64decode(link1)) - file1.close() - print '========> boot cd created for :',self.node_spec['hostname'] - self.conffile('boot_file.iso',self.node_spec['hostname'], path) #create 2 conf file for the vmware based - except Exception, e: - print str(e) - sys.exit(1) diff --git a/plctest/TestPlc.py b/plctest/TestPlc.py deleted file mode 100644 index d183e830..00000000 --- a/plctest/TestPlc.py +++ /dev/null @@ -1,101 +0,0 @@ -import os -import sys -import xmlrpclib -import datetime -from TestSite import TestSite -from TestNode import TestNode - -class TestPlc: - - def __init__ (self,plc_spec): - self.plc_spec=plc_spec - self.url="https://%s:443/PLCAPI/"%plc_spec['hostname'] - self.server=xmlrpclib.Server(self.url,allow_none=True) - self.path=os.path.dirname(sys.argv[0]) - - def connect (self): - # tricky : define les methodes de l'API sur cet object - pass - - def auth_root (self): - return {'Username':self.plc_spec['PLC_ROOT_USER'], - 'AuthMethod':'password', - 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'], - 'Role' : self.plc_spec['role'] - } - def affiche_results(self, test_case_name, status, timers): - timers=datetime.datetime.now() - fileHandle = open (self.path+'/results.txt', 'a' ) - fileHandle.write ( str(test_case_name)+' ' +str(status)+' '+str(timers)) - fileHandle.close() - - - - def config_plc(self,plc_spec): -# Thierry 2007-07-05 -# now plc-config-tty silently creates needed directories -# os.system('mkdir -p /etc/planetlab/configs') - - fileconf=open('tty_conf','w') - for var in [ 'PLC_NAME', - 'PLC_ROOT_PASSWORD', - 'PLC_ROOT_USER', - 'PLC_MAIL_ENABLED', - 'PLC_MAIL_SUPPORT_ADDRESS', - 'PLC_DB_HOST', - 'PLC_API_HOST', - 'PLC_WWW_HOST', - 'PLC_BOOT_HOST', - 'PLC_NET_DNS1', - 'PLC_NET_DNS2']: - fileconf.write ('e %s\n%s\n'%(var,plc_spec[var])) - fileconf.write('w\n') - fileconf.write('q\n') - fileconf.close() - os.system('set -x ; cat tty_conf') - os.system('set -x ; chroot /plc/root plc-config-tty < tty_conf') - os.system('set -x ; service plc start') - os.system('set -x; service sendmail stop') - os.system('set -x; chroot /plc/root service sendmail restart') - - def cleanup_plc(self): - os.system('service plc safestop') - #####detecting the last myplc version installed and remove it - os.system('set -x; rpm -e myplc') - print "=======================>Remove Myplc DONE!" - ##### Clean up the /plc directory - os.system('set -x; rm -rf /plc/data') - print "=======================>Clean up DONE!" - - def install_plc(self,url): - print url - os.system('set -x; rpm -ivh '+url) - os.system('set -x; service plc mount') - - def init_site (self,site_spec): - test_site = TestSite (self,site_spec) - test_site.create_site() - for key in site_spec['users']: - test_site.create_user(key) - test_site.enable_user(key) - test_site.add_key_user(key) - return test_site - - def init_node (self,test_site,node_spec,path): - - test_node = TestNode(self, test_site, node_spec) - test_node.create_node ("pi") - test_node.create_node ("tech") - test_node.create_boot_cd(node_spec,path) - return test_node - - def db_dump(self): - - t=datetime.datetime.now() - d=t.date() - dump='/var/lib/pgsql/backups/planetlab4-'+str(d)+'-2nodes' - os.system('chroot /plc/root pg_dump -U pgsqluser planetlab4 -f '+ dump) - print 'dump is done',dump - - - diff --git a/plctest/TestRestore.py b/plctest/TestRestore.py deleted file mode 100755 index fa015c88..00000000 --- a/plctest/TestRestore.py +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python - -import os, sys, time -from optparse import OptionParser -import xmlrpclib - -class TestRestore: - - subversion_id = "$Id$" - - def __init__ (self): - self.url="https://localhost:443/PLCAPI/" - self.server=xmlrpclib.Server(self.url,allow_none=True) - self.path=os.path.dirname(sys.argv[0]) - -###################3 - def auth_root (self): - return {'Username':'root@onelab-test.inria.fr', - 'AuthMethod':'password', - 'AuthString':'test++', - 'Role' : 'root' - } - -##############check if the db version exsit - def check_dir(self,dbname): - - config_file = "/plc/data/var/lib/pgsql/backups/"+dbname - if (os.path.isfile (config_file)): - print "==>dbversion found " - return 1 - else: - print "\n %s non-existing Bdd version\n" % config_file - return 0 - -##############restoring one db return list of host nodes - def restore_db(self,db,display): - try: - list_host=[] - ##stop httpd service - os.system('chroot /plc/root service httpd stop') - ##droping - os.system(' echo drop database planetlab4 |chroot /plc/root psql --user=pgsqluser template1') - ##creating - os.system('chroot /plc/root createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4') - ##populating - os.system('chroot /plc/root psql -U pgsqluser planetlab4 -f /var/lib/pgsql/backups/'+db) - ##starting httpd service - os.system('chroot /plc/root service httpd start') - - print 'db.restored' - hosts=self.server.GetNodes(self.auth_root()) - for host in hosts: - print host['hostname'] - list_host.append(host['hostname']) - - for l in list_host : - print display - os.system('DISPLAY=%s vmplayer %s/VirtualFile-%s/My_Virtual_Machine.vmx &'%(display,self.path,l)) - - except Exception, e: - print str(e) -########################### - - - - - def main (self): - try: - usage = """usage: %prog [options] BDDversion""" - parser=OptionParser(usage=usage,version=self.subversion_id) - # verbosity - parser.add_option("-v","--verbose", action="store_true", dest="verbose", default=False, - help="Run in verbose mode") - #exporting Display - parser.add_option("-d","--display", action="store", dest="Xdisplay", default='bellami:0.0', - help="export the display on the mentionneted one") - - - (self.options, self.args) = parser.parse_args() - - hosts=[] - i=0 - dirname ='' - display='' - - - if (self.options.Xdisplay): - display=self.options.Xdisplay - print 'the display is', display - - - if (len(self.args) == 0 ): - parser.print_help() - sys.exit(1) - else: - dirname=self.args[0] - - if (not (self.check_dir(dirname))): - parser.print_help() - sys.exit(1) - - self.restore_db(dirname,display) - - except Exception, e: - print str(e) - -if __name__ == "__main__": - TestRestore().main() - diff --git a/plctest/TestSite.py b/plctest/TestSite.py deleted file mode 100644 index f9d4caa6..00000000 --- a/plctest/TestSite.py +++ /dev/null @@ -1,223 +0,0 @@ -import os -import sys -import datetime -import time -from TestConfig import * -import xmlrpclib - -class TestSite: - - def __init__ (self,test_plc,site_spec): - self.test_plc=test_plc - self.site_spec=site_spec - self.timset=time.strftime("%H:%M:%S", time.localtime()) - - def create_site (self): - try: - print self.test_plc.auth_root() - self.site_id = self.test_plc.server.AddSite(self.test_plc.auth_root(), - self.site_spec['site_fields']) - self.test_plc.server.AddSiteAddress(self.test_plc.auth_root(),self.site_id, - self.site_spec['site_address']) - - return self.site_id - except Exception, e: - print str(e) - - def site_id(self): - return self.site_id() - - def create_user (self, user_spec): - try: - i=0 - print '========>Adding user at '+self.timset+ ': ',user_spec - self.person_id=self.test_plc.server.AddPerson(self.test_plc.auth_root(), - user_spec) - self.test_plc.server.UpdatePerson(self.test_plc.auth_root(), - self.person_id,{'enabled': True}) - for role in user_spec['roles']: - self.test_plc.server.AddRoleToPerson(self.test_plc.auth_root(), - role,user_spec['email']) - self.test_plc.server.AddPersonToSite(self.test_plc.auth_root(), - user_spec['email'], - self.site_spec['site_fields']['login_base']) - except Exception,e: - print str(e) - - def enable_user (self, user_spec): - try: - persones=self.test_plc.server.GetPersons(self.test_plc.auth_root()) - for person in persones: - if (person['enabled']!="True"): - self.test_plc.server.UpdatePerson(self.test_plc.auth_root(), - person['person_id'], - {'enabled': True}) - except Exception,e: - print str(e) - - def add_key_user(self,user_spec): - try: - auth="" - for userspec in self.site_spec['users']: - if(user_spec == userspec): - for role in userspec['roles']: - auth=auth+role - print auth - self.test_plc.server.AddPersonKey(self.anyuser_auth(auth), - user_spec['email'], key) - except Exception, e: - print str(e) - - def anyuser_auth (self,key): - for person in self.site_spec['users']: - if person['auth_meth']== key : - return {'Username':person['email'], - 'AuthMethod':'password', - 'AuthString':person['password'], - 'Role':person['roles'][0], - } - - def node_check_status(self,liste_nodes,bool): - try: - ret_value=True - filter=['boot_state'] - bt={'boot_state':'boot'} - dbg={'boot_state':'dbg'} - secondes=15 - start_time = datetime.datetime.now() ##geting the current time - dead_time=datetime.datetime.now()+ datetime.timedelta(minutes=10)##adding 10minutes - start=time.strftime("%H:%M:%S", time.localtime()) - print "time in the begining is :",start - - for l in liste_nodes : - while (bool): - node_status=self.test_plc.server.GetNodes(self.test_plc.auth_root(), - l['hostname'], filter) - timset=time.strftime("%H:%M:%S", time.localtime()) - print 'the actual status for the node '+l['hostname']+' at '+str(timset)+' is :',node_status - try: - if (node_status[0] == bt): - test_name='\nTest Installation Node hosted: '+l['hostname'] - self.test_plc.affiche_results(test_name, 'Successful', '')##printing out the result - break ##for exsiting and renaming virtual file to just installed - elif (node_status[0] ==dbg): - test_name='\nTest Installation Node hosted: '+l['hostname'] - self.test_plc.affiche_results(test_name, 'En Debug', '')##printing out the result - bool=False - break ##for exsiting and renaming virtual file to just installed - elif ( start_time <= dead_time ) : - start_time=datetime.datetime.now()+ datetime.timedelta(minutes=2) - time.sleep(secondes) - else: bool=False - except OSError ,e : - bool=False - str(e) - if (bool): - print "Node correctly instaled and booted " - else : - print "Node not fully booted "##cheek if configuration file already exist - ret_value=False - test_name='\nTest Installation Node Hosted: ',l['hostname'] - self.test_plc.affiche_results(test_name, 'Failure', '')##printing out the result - - end=time.strftime("%H:%M:%S", time.localtime()) - print "time at the end is :",end ##converting time to secondes - return ret_value - except Exception, e: - print str(e) - print "vmware killed if problems occur " - time.sleep(10) - self.kill_all_vmwares() - sys.exit(1) - - def kill_all_vmwares(self): - os.system('pgrep vmware | xargs -r kill') - os.system('pgrep vmplayer | xargs -r kill ') - os.system('pgrep vmware | xargs -r kill -9') - os.system('pgrep vmplayer | xargs -r kill -9') - - def run_vmware(self,liste_nodes,display): - path=os.path.dirname(sys.argv[0]) - print path - print " kill last vmware before any new installation " - self.kill_all_vmwares() - print 'i will be displayed here========>', display - arg='< /dev/null &>/dev/null &' - for l in liste_nodes : - #os.system('set -x; vmplayer VirtualFile-%s/My_Virtual_Machine.vmx %s '%(l['hostname'],arg)) - os.system('set -x; DISPLAY=%s vmplayer %s/VirtualFile-%s/My_Virtual_Machine.vmx %s '%(display,path,l['hostname'],arg)) - - def delete_known_hosts(self): - try: - file1=open('/root/.ssh/known_hosts','r') - file2=open('/root/.ssh/known_hosts_temp','w') - while 1: - txt = file1.readline() - if txt=='': - file1.close() - file2.close() - break - if txt[0:4]!='test' : - file2.write(txt) - - - os.system('mv -f /root/.ssh/known_hosts_temp /root/.ssh/known_hosts') - except Exception, e: - print str(e) - - def slice_access(self,liste_nodes): - try: - bool=True - bool1=True - secondes=15 - self.delete_known_hosts() - start_time = datetime.datetime.now() - dead_time=start_time + datetime.timedelta(minutes=3)##adding 3minutes - for slice in slices_specs: - for slicenode in slice['slice_nodes']: - timset=time.strftime("%H:%M:%S", time.localtime()) - while(bool): - print '=========>Try to Restart the Node Manager on %s at %s:'%(slicenode['hostname'],str(timset)) - access=os.system('set -x; ssh -i /etc/planetlab/root_ssh_key.rsa root@%s service nm restart'%slicenode['hostname'] ) - if (access==0): - print '=========>Node Manager Restarted on %s at %s:'%(slicenode['hostname'] ,str(timset)) - while(bool1): - print '=========>Try to connect to the %s@%s at %s '%(slice['slice_spec']['name'],slicenode['hostname'],str(time.strftime("%H:%M:%S", time.localtime()))) - Date=os.system('set -x; ssh -i ~/.ssh/slices.rsa %s@%s echo "The Actual Time here is;" date'%(slice['slice_spec']['name'],slicenode['hostname'])) - if (Date==0): - break - elif ( start_time <= dead_time ) : - start_time=datetime.datetime.now()+ datetime.timedelta(seconds=30) - time.sleep(secondes) - else: - bool1=False - if(bool1): - print '=========>connected to the '+slice['slice_spec']['name']+'@'+slicenode['hostname'] +'--->' - else: - print '=========>access to one slice is denied but last chance' - print '=========>Retry to Restart the Node Manager on %s at %s:'%(slicenode['hostname'],str(timset)) - access=os.system('set -x; ssh -i /etc/planetlab/root_ssh_key.rsa root@%s service nm restart'%slicenode['hostname'] ) - if (access==0): - print '=========>Retry to connect to the %s@%s at %s '%(slice['slice_spec']['name'],slicenode['hostname'],str(time.strftime("%H:%M:%S", time.localtime()))) - Date=os.system('set -x; ssh -i ~/.ssh/slices.rsa %s@%s echo "The Actual Time here is;" date'%(slice['slice_spec']['name'],slicenode['hostname'] )) - if (Date==0): - print '=========>connected to the '+slice['slice_spec']['name']+'@'+slicenode['hostname']+'--->' - else: - print '=========>the Access is finaly denied' - sys.exit(1) - else :"=========>Last try failed" - break - elif ( start_time <= dead_time ) : - start_time=datetime.datetime.now()+ datetime.timedelta(minutes=1) - time.sleep(secondes) - else: - bool=False - - if (not bool): - print 'Node manager problems' - sys.exit(1) - - except Exception, e: - print str(e) - sys.exit(1) - diff --git a/plctest/tty_conf b/plctest/tty_conf deleted file mode 100644 index 1cea1527..00000000 --- a/plctest/tty_conf +++ /dev/null @@ -1,24 +0,0 @@ -e PLC_NAME -TestLab -e PLC_ROOT_PASSWORD -test++ -e PLC_ROOT_USER -root@onelab-test.inria.fr -e PLC_MAIL_ENABLED -true -e PLC_MAIL_SUPPORT_ADDRESS -mohamed-amine.chaoui@sophia.inria.fr -e PLC_DB_HOST -onelab-test.inria.fr -e PLC_API_HOST -onelab-test.inria.fr -e PLC_WWW_HOST -onelab-test.inria.fr -e PLC_BOOT_HOST -onelab-test.inria.fr -e PLC_NET_DNS1 -138.96.0.10 -e PLC_NET_DNS2 -138.96.0.11 -w -q -- 2.45.2