organizing the tests area
authorThierry Parmentelat <thierry.parmentelat@sophia.inria.fr>
Wed, 21 Nov 2007 19:39:17 +0000 (19:39 +0000)
committerThierry Parmentelat <thierry.parmentelat@sophia.inria.fr>
Wed, 21 Nov 2007 19:39:17 +0000 (19:39 +0000)
15 files changed:
system/My-Virtual-Machine-model/My_Virtual_Machine.vmx [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk [new file with mode: 0644]
system/My-Virtual-Machine-model/My_Virtual_Machine_model.vmdk [new file with mode: 0644]
system/TestConfig.py [new file with mode: 0644]
system/TestMain.py [new file with mode: 0755]
system/TestNode.py [new file with mode: 0644]
system/TestPlc.py [new file with mode: 0644]
system/TestRestore.py [new file with mode: 0755]
system/TestSite.py [new file with mode: 0644]
system/tty_conf [new file with mode: 0644]

diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine.vmx b/system/My-Virtual-Machine-model/My_Virtual_Machine.vmx
new file mode 100644 (file)
index 0000000..ee9e680
--- /dev/null
@@ -0,0 +1,121 @@
+#!/usr/bin/vmplayer
+
+# Filename: My_Virtual_Machine.vmx
+# Generated 2007-05-28;11:47:54 by EasyVMX!
+# http://www.easyvmx.com
+
+# This is a Workstation 5 or 5.5 config file
+# It can be used with Player
+config.version = "8"
+virtualHW.version = "4"
+
+# Selected operating system for your virtual machine
+guestOS = "other"
+
+# displayName is your own name for the virtual machine
+#displayName  
+
+# These fields are free text description fields
+guestinfo.vmware.product.url = "amine.parmentelat.net"
+guestinfo.vmware.product.class = "virtual machine"
+
+# Number of virtual CPUs. Your virtual machine will not
+# work if this number is higher than the number of your physical CPUs
+numvcpus = "1"
+
+# Memory size and other memory settings
+memsize = "320"
+MemAllowAutoScaleDown = "FALSE"
+MemTrimRate = "-1"
+
+# Unique ID for the virtual machine will be created
+uuid.action = "create"
+
+# Remind to install VMware Tools
+# This setting has no effect in VMware Player
+tools.remindInstall = "TRUE"
+
+# Startup hints interfers with automatic startup of a virtual machine
+# This setting has no effect in VMware Player
+hints.hideAll = "TRUE"
+
+# Enable time synchronization between computer
+# and virtual machine
+tools.syncTime = "TRUE"
+
+# USB settings
+# This config activates USB
+usb.present = "TRUE"
+usb.generic.autoconnect = "FALSE"
+
+# First serial port, physical COM1 is available
+serial0.present = "FALSE"
+serial0.fileName = "Auto Detect"
+serial0.autodetect = "TRUE"
+serial0.hardwareFlowControl = "TRUE"
+
+# Optional second serial port, physical COM2 is not available
+serial1.present = "FALSE"
+
+# First parallell port, physical LPT1 is available
+parallel0.present = "FALSE"
+parallel0.fileName = "Auto Detect"
+parallel0.autodetect = "TRUE"
+parallel0.bidirectional = "TRUE"
+
+# Logging
+# This config activates logging, and keeps last log
+logging = "TRUE"
+log.fileName = "My_Virtual_Machine.log"
+log.append = "TRUE"
+log.keepOld = "1"
+
+# These settings decides interaction between your
+# computer and the virtual machine
+isolation.tools.hgfs.disable = "FALSE"
+isolation.tools.dnd.disable = "FALSE"
+isolation.tools.copy.enable = "TRUE"
+isolation.tools.paste.enabled = "TRUE"
+
+# First network interface card
+ethernet0.present = "TRUE"
+#ethernet0.virtualDev = "vmxnet"
+ethernet0.connectionType = "nat"
+ethernet0.addressType = "generated"
+ethernet0.generatedAddressOffset = "0"
+
+# Settings for physical floppy drive
+floppy0.present = "FALSE"
+
+# Settings for physical CDROM drive
+ide1:0.present = "TRUE"
+ide1:0.deviceType = "cdrom-raw"
+ide1:0.startConnected = "FALSE"
+ide1:0.fileName = "/dev/cdrom"
+
+# Settings for the optional virtual CDROM, ISO-image
+ide1:1.present = "TRUE"
+*ide1:1.fileName = "amine.parmentelat.net-3.3.iso"
+ide1:1.deviceType = "cdrom-image"
+ide1:1.mode = "persistent"
+ide1:1.startConnected = "TRUE"
+
+# LsiLogic SCSI card
+scsi0.present = "TRUE"
+scsi0.virtualDev = "lsilogic"
+
+# First SCSI disk image, size 11Gb
+scsi0:0.present = "TRUE"
+scsi0:0.fileName = "My_Virtual_Machine_model.vmdk"
+scsi0:0.mode = "persistent"
+scsi0:0.startConnected = "TRUE"
+scsi0:0.writeThrough = "TRUE"
+
+# END OF EasyVMX! CONFIG
+
+scsi0:0.redo = ""
+ethernet0.generatedAddress = "00:0c:29:24:ff:8f"
+uuid.location = "56 4d 4f 31 ef 1d 6d 78-35 2f 38 b2 a2 24 ff 8f"
+uuid.bios = "56 4d 4f 31 ef 1d 6d 78-35 2f 38 b2 a2 24 ff 8f"
+
+usb.autoConnect.device0 = "path:1/0 autoclean:1"
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk
new file mode 100644 (file)
index 0000000..d6d0e81
Binary files /dev/null and b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s001.vmdk differ
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk
new file mode 100644 (file)
index 0000000..d6d0e81
Binary files /dev/null and b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s002.vmdk differ
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk
new file mode 100644 (file)
index 0000000..d6d0e81
Binary files /dev/null and b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s003.vmdk differ
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk
new file mode 100644 (file)
index 0000000..d6d0e81
Binary files /dev/null and b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s004.vmdk differ
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk
new file mode 100644 (file)
index 0000000..d6d0e81
Binary files /dev/null and b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s005.vmdk differ
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk
new file mode 100644 (file)
index 0000000..d9de1f8
Binary files /dev/null and b/system/My-Virtual-Machine-model/My_Virtual_Machine_model-s006.vmdk differ
diff --git a/system/My-Virtual-Machine-model/My_Virtual_Machine_model.vmdk b/system/My-Virtual-Machine-model/My_Virtual_Machine_model.vmdk
new file mode 100644 (file)
index 0000000..813842e
--- /dev/null
@@ -0,0 +1,22 @@
+# Disk DescriptorFile
+version=1
+CID=a596a14e
+parentCID=ffffffff
+createType="twoGbMaxExtentSparse"
+
+# Extent description
+RW 4192256 SPARSE "My_Virtual_Machine_model-s001.vmdk"
+RW 4192256 SPARSE "My_Virtual_Machine_model-s002.vmdk"
+RW 4192256 SPARSE "My_Virtual_Machine_model-s003.vmdk"
+RW 4192256 SPARSE "My_Virtual_Machine_model-s004.vmdk"
+RW 4192256 SPARSE "My_Virtual_Machine_model-s005.vmdk"
+RW 2107392 SPARSE "My_Virtual_Machine_model-s006.vmdk"
+
+# The Disk Data Base 
+#DDB
+
+ddb.virtualHWVersion = "4"
+ddb.geometry.cylinders = "1435"
+ddb.geometry.heads = "255"
+ddb.geometry.sectors = "63"
+ddb.adapterType = "lsilogic"
diff --git a/system/TestConfig.py b/system/TestConfig.py
new file mode 100644 (file)
index 0000000..67357a2
--- /dev/null
@@ -0,0 +1,131 @@
+#definition of all structure used by the test_setup.py script
+site1_nodes = {
+'node1' :  {'hostname': 'test1.one-lab.org',
+            'boot_state':'inst',
+            'model':'vmware/minhw',
+            'owned' : 'pi',
+            'network': { 'method':'static',
+                         'type':'ipv4',
+                         'ip':'192.168.132.128',
+                         'gateway':'192.168.132.1',
+                         'network':'192.168.132.0',
+                         'broadcast':'192.168.132.255',
+                         'netmask':'255.255.255.0',
+                         'dns1': '192.168.132.2',
+                         },
+            
+            },
+'node2' :   { 'hostname':'test2.one-lab.org',
+              'boot_state':'inst',
+              'model':'vmware/minhw',
+              'owned' : 'tech',
+              'network': {'method':'static',
+                          'type':'ipv4',
+                          'ip':'192.168.132.130',
+                          'gateway':'192.168.132.1',
+                          'network':'192.168.132.0',
+                          'broadcast':'192.168.132.255',
+                          'netmask':'255.255.255.0',
+                          'dns1': '192.168.132.2',
+                          },
+              
+              },
+}
+site_users= {
+'pi_spec' : {'first_name':'PI',
+              'last_name':'PI',
+              'enabled':'True',
+              'email':'fake-pi1@one-lab.org',
+              'password':'testpi',
+              'roles':['pi'],
+               'auth_meth':'pi',
+              },
+'tech_spec' : {'first_name':'Tech',
+               'last_name':'Tech',
+                'enabled':'true',
+                'email':'fake-tech1@one-lab.org',
+                'password':'testtech',
+                'roles':['tech'],
+                 'auth_meth':'tech',
+                },
+'user_spec' : {'first_name':'User',
+                'last_name':'User',
+                'enabled':'true',
+                'email':'fake-user1@one-lab.org',
+                'password':'testuser',
+                'roles':['user'],
+                 'auth_meth':'user',
+                },
+'tech_user_spec' : {'first_name':'UserTech',
+                'last_name':'UserTech',
+                'enabled':'true',
+                'email':'fake-tech2@one-lab.org',
+                'password':'testusertech',
+                'roles':['tech','user'],
+                 'auth_meth':'techuser',
+                 },
+'pi_tech_spec' : {'first_name':'PiTech',
+                'last_name':'PiTech',
+                'enabled':'true',
+                'email':'fake-pi2@one-lab.org',
+                'password':'testusertech',
+                'roles':['pi','tech'],
+                 'auth_meth':'pitech',
+                  },
+}
+site_spec1 = {
+'site_fields' : {'name':'testsite',
+                'login_base':'ts',
+                'abbreviated_name':'PLanettest',
+                'max_slices':100,
+                'url':'http://onelab-test.inria.fr',
+                },
+'site_address' : {'line1':'route des lucioles',
+                 'city':'sophia',
+                 'state':'fr',
+                 'postalcode':'06600',
+                 'country':'france',
+                 },
+'users': [ site_users['pi_spec'], site_users['tech_spec'], site_users['user_spec'],site_users['tech_user_spec'],site_users['pi_tech_spec']],
+'nodes' :  [ site1_nodes['node1'], site1_nodes['node2']],
+}
+
+    
+site_specs = [ site_spec1 ]
+
+plc_spec1 =  { 
+    'hostname' : 'localhost',
+    'role' : 'root',
+    'PLC_ROOT_USER' : 'root@onelab-test.inria.fr',
+    'PLC_ROOT_PASSWORD' : 'test++',
+    'PLC_NAME' : 'TestLab',
+    'PLC_MAIL_ENABLED':'true',
+    'PLC_MAIL_SUPPORT_ADDRESS' : 'mohamed-amine.chaoui@sophia.inria.fr',
+    'PLC_DB_HOST' : 'onelab-test.inria.fr',
+    'PLC_API_HOST' : 'onelab-test.inria.fr',
+    'PLC_WWW_HOST' : 'onelab-test.inria.fr',
+    'PLC_BOOT_HOST' : 'onelab-test.inria.fr',
+    'PLC_NET_DNS1' : '138.96.0.10',
+    'PLC_NET_DNS2' : '138.96.0.11',
+    'sites' : site_specs,
+    }
+plc_specs = [ plc_spec1 ]
+
+key={'key_type':'ssh',
+     'key':'ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA4jNj8yT9ieEc6nSJz/ESu4fui9WrJ2y/MCfqIZ5WcdVKhBFUYyIenmUaeTduMcSqvoYRQ4QnFR1BFdLG8XR9D6FWZ5zTKUgpkew22EVNeqai4IXeWYKyt1Qf3ehaz9E3o1PG/bmQNIM6aQay6TD1Y4lqXI+eTVXVQev4K2fixySjFQpp9RB4UHbeA8c28yoa/cgAYHqCqlvm9uvpGMjgm/Qa4M+ZeO7NdjowfaF/wF4BQIzVFN9YRhvQ/d8WDz84B5Pr0J7pWpaX7EyC4bvdskxl6kmdNIwIRcIe4OcuIiX5Z9oO+7h/chsEVJWF4vqNIYlL9Zvyhnr0hLLhhuk2bw== root@onelab-test.inria.fr'}
+
+
+slice1_spec={
+'slice_spec':{'name':'ts_slicetest1',
+              'instantiation':'plc-instantiated',
+              'url':'http://foo@ffo.com',
+              'description':'testslice the first slice for the site testsite',
+              'max_nodes':1000
+              },
+'slice_users' : [ site_users['pi_spec'], site_users['tech_spec'],site_users['tech_user_spec']],
+'slice_nodes' : [ site1_nodes['node1'], site1_nodes['node2'] ],
+}
+slices_specs= [slice1_spec]
+
+
+
diff --git a/system/TestMain.py b/system/TestMain.py
new file mode 100755 (executable)
index 0000000..498cfbb
--- /dev/null
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+import os, sys, time
+from optparse import OptionParser
+from TestPlc import TestPlc
+from TestSite import TestSite
+from TestNode import TestNode
+import TestConfig
+import threading
+
+class TestMain:
+
+    subversion_id = "$Id$"
+
+    def __init__ (self):
+       self.path=os.path.dirname(sys.argv[0])
+
+    def main (self):
+        try:
+            usage = """usage: %prog [options] MyplcURL"""
+            parser=OptionParser(usage=usage,version=self.subversion_id)
+            # verbosity
+            parser.add_option("-v","--verbose", action="store_true", dest="verbose", default=False, 
+                              help="Run in verbose mode")
+            # debug mode
+            parser.add_option("-g","--debug", action="store", dest="debug", 
+                              help="Run in debug mode for eventual virtual problems")
+            #exporting Display
+            parser.add_option("-d","--display", action="store", dest="Xterm", default='bellami:0.0',
+                              help="export the display on the mentionneted one")
+        
+            (self.options, self.args) = parser.parse_args()
+
+            display=''
+            url=''
+            test_plcs=[]
+            test_nodes=[]
+            pids=[]
+            timset=time.strftime("%H:%M:%S", time.localtime())
+            #test the existence of the URL
+            if (len (self.args)):
+                url=self.args[0]
+                print 'the myplc url is ',url
+            else:
+                print "PLease introduce a right URL for the myplc instal"
+                sys.exit(1)
+            #check where to display Virtual machines
+            if (self.options.Xterm):
+                display=self.options.Xterm
+                print 'the display is', display
+            #the debug option 
+            if (self.options.debug):
+                file=self.path+'/'+self.options.debug+'/My_Virtual_Machine.vmx'
+                if os.path.exists(file):
+                    print 'vmx file is',file
+                    arg='< /dev/null &>/dev/null &'
+                    os.system('DISPLAY=%s vmplayer %s %s '%(display,file,arg))
+                    sys.exit(0)
+                else:
+                    print "no way to find the virtual file"
+                    sys.exit(1)
+            
+            for plc_spec in TestConfig.plc_specs:
+                print '========>Creating plc at '+timset+':',plc_spec
+                test_plc = TestPlc(plc_spec)
+                test_plc.connect()
+                test_plcs.append(test_plc)
+                test_plc.cleanup_plc()
+                print '========>Installing myplc at: ', timset
+                if (len(sys.argv) > 1):
+                    test_plc.install_plc(url)
+                    test_plc.config_plc(plc_spec)
+                else :
+                    print "========>PLease insert a valid url for the myplc install"
+                ##create all the sites under the new plc,and then populate them with
+                ##nodes,persons and slices
+                for site_spec in plc_spec['sites']:
+                    print '========>Creating site at '+timset+ ':',site_spec
+                    test_site = test_plc.init_site(site_spec)
+                    for node_spec in site_spec['nodes']:
+                        print '========>Creating node at  '+ timset+' :',node_spec
+                        test_nodes.append(node_spec)
+                        test_node = test_plc.init_node(test_site,node_spec,self.path)
+                test_node.create_slice ("pi")
+                print 'Runing Checkers and Vmwares for Site nodes at :',timset
+                test_site.run_vmware(test_nodes,display)
+                if(test_site.node_check_status(test_nodes,True)):
+                    test_plc.db_dump()
+                    test_site.slice_access(test_nodes)
+                    print "all is alright"
+                    return 0
+                else :
+                    print "There is something wrong"
+                    sys.exit(1)
+        except Exception, e:
+            print str(e)
+            sys.exit(1)
+           
+if __name__ == "__main__":
+    TestMain().main()
diff --git a/system/TestNode.py b/system/TestNode.py
new file mode 100644 (file)
index 0000000..0ca91ba
--- /dev/null
@@ -0,0 +1,94 @@
+import os
+import sys
+import time
+import base64
+import TestConfig
+import xmlrpclib
+
+class TestNode:
+
+    def __init__ (self,test_plc,test_site,node_spec):
+       self.test_plc=test_plc
+       self.test_site=test_site
+       self.node_spec=node_spec
+        self.timset=time.strftime("%H:%M:%S", time.localtime())
+    def create_node (self,role):
+        auth = self.test_site.anyuser_auth (role)
+        filter={'boot_state':'rins'}
+        try:
+            if (role=='pi' and self.node_spec['owned']=='pi'):
+                self.node_id = self.test_plc.server.AddNode(auth,
+                                                            self.test_site.site_spec['site_fields']['login_base'],
+                                                            self.node_spec)
+                self.test_plc.server.AddNodeNetwork(auth,self.node_id,
+                                                    self.node_spec['network'])
+                self.test_plc.server.UpdateNode(auth, self.node_id, filter)
+                return self.node_id
+            
+            elif (role=='tech' and self.node_spec['owned']=='tech'):
+                self.node_id = self.test_plc.server.AddNode(auth,
+                                                            self.test_site.site_spec['site_fields']['login_base'],
+                                                            self.node_spec)
+                self.test_plc.server.AddNodeNetwork(auth,self.node_id,
+                                                    self.node_spec['network'])
+                self.test_plc.server.UpdateNode(auth, self.node_id, filter)
+                return self.node_id
+        except Exception, e:
+                print str(e)
+
+    def create_slice(self, role):
+        auth = self.test_site.anyuser_auth (role)
+        liste_hosts=[]
+        #for l in liste_nodes_spec :
+        #    liste_hosts.append(l['hostname'])
+        try:
+            for slicespec in TestConfig.slices_specs :
+                print '========>Creating slice at :'+self.timset+' : ',slicespec
+                slice_id=self.test_plc.server.AddSlice(auth,slicespec['slice_spec'])
+                for sliceuser in slicespec['slice_users']:
+                    self.test_plc.server.AddPersonToSlice(auth, sliceuser['email'], slice_id)##affecting person to the slice
+                for slicenode in slicespec['slice_nodes']:
+                    liste_hosts.append(slicenode['hostname'])
+                self.test_plc.server.AddSliceToNodes(auth, slice_id, liste_hosts)##add slice to the spec nodes
+            print 'fin creation slices'
+        except Exception, e:
+            print str(e)
+            sys.exit(1)
+        
+    def conffile(self,image,hostname,path):
+        try:
+            file=path+'/VirtualFile-'+hostname+'/My_Virtual_Machine.vmx'
+            f2=open(file,'w')
+            
+            f1=open(path+'/My-Virtual-Machine-model/My_Virtual_Machine.vmx','r')
+            while 1:
+                txt = f1.readline()
+                if txt=='':
+                    f1.close()
+                    f2.close()
+                    break
+                if txt[0]!='*' :
+                    f2.write(txt)
+                else :
+                    f2.write('ide1:1.fileName = '+'"'+image+'"' '\n')
+          
+            
+        except Exception, e:
+            print str(e)
+
+    def create_boot_cd(self,node_spec,path):
+        try:
+            os.system('mkdir  -p  %s/VirtualFile-%s  &&  cp  %s/My-Virtual-Machine-model/*  %s/VirtualFile-%s'
+                      %(path, node_spec['hostname'], path, path, node_spec['hostname']))
+            link1=self.test_plc.server.GetBootMedium(self.test_plc.auth_root(),
+                                                     node_spec['hostname'], 'node-iso', '')
+            if (link1 == ''):
+                raise Exception, 'boot.iso not found'
+            file1=open(path+'/VirtualFile-'+node_spec['hostname']+'/boot_file.iso','w')
+            file1.write(base64.b64decode(link1))
+            file1.close()
+            print '========> boot cd created for :',self.node_spec['hostname']
+            self.conffile('boot_file.iso',self.node_spec['hostname'], path) #create 2 conf file for the vmware based
+        except Exception, e:
+            print str(e)
+            sys.exit(1)
diff --git a/system/TestPlc.py b/system/TestPlc.py
new file mode 100644 (file)
index 0000000..d183e83
--- /dev/null
@@ -0,0 +1,101 @@
+import os
+import sys
+import xmlrpclib
+import datetime
+from TestSite import TestSite
+from TestNode import TestNode
+
+class TestPlc:
+
+    def __init__ (self,plc_spec):
+       self.plc_spec=plc_spec
+       self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
+       self.server=xmlrpclib.Server(self.url,allow_none=True)
+       self.path=os.path.dirname(sys.argv[0])
+        
+    def connect (self):
+       # tricky : define les methodes de l'API sur cet object
+       pass
+    
+    def auth_root (self):
+       return {'Username':self.plc_spec['PLC_ROOT_USER'],
+               'AuthMethod':'password',
+               'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
+                'Role' : self.plc_spec['role']
+                }
+    def affiche_results(self, test_case_name, status, timers):
+        timers=datetime.datetime.now()
+        fileHandle = open (self.path+'/results.txt', 'a' )
+        fileHandle.write ( str(test_case_name)+'                    ' +str(status)+'                    '+str(timers))
+        fileHandle.close()
+
+        
+
+    def config_plc(self,plc_spec):
+# Thierry 2007-07-05 
+# now plc-config-tty silently creates needed directories
+#        os.system('mkdir -p /etc/planetlab/configs')
+
+        fileconf=open('tty_conf','w')
+        for var in [ 'PLC_NAME',
+                     'PLC_ROOT_PASSWORD',
+                     'PLC_ROOT_USER',
+                     'PLC_MAIL_ENABLED',
+                     'PLC_MAIL_SUPPORT_ADDRESS',
+                     'PLC_DB_HOST',
+                     'PLC_API_HOST',
+                     'PLC_WWW_HOST',
+                     'PLC_BOOT_HOST',
+                     'PLC_NET_DNS1',
+                     'PLC_NET_DNS2']:
+            fileconf.write ('e %s\n%s\n'%(var,plc_spec[var]))
+        fileconf.write('w\n')
+        fileconf.write('q\n')
+        fileconf.close()
+        os.system('set -x ; cat tty_conf')
+        os.system('set -x ; chroot /plc/root  plc-config-tty < tty_conf')
+        os.system('set -x ; service plc start')
+        os.system('set -x; service sendmail stop')
+        os.system('set -x; chroot /plc/root service sendmail restart')
+        
+    def cleanup_plc(self):
+        os.system('service plc safestop')
+        #####detecting the last myplc version installed and remove it
+        os.system('set -x; rpm -e myplc')
+        print "=======================>Remove Myplc DONE!"
+        ##### Clean up the /plc directory
+        os.system('set -x; rm -rf  /plc/data')
+        print "=======================>Clean up  DONE!"
+        
+    def install_plc(self,url):
+        print url
+        os.system('set -x; rpm -ivh '+url)
+        os.system('set -x; service plc mount')
+      
+    def init_site (self,site_spec):
+        test_site = TestSite (self,site_spec)
+        test_site.create_site()
+        for key in site_spec['users']:
+            test_site.create_user(key)
+            test_site.enable_user(key)
+            test_site.add_key_user(key)            
+        return test_site
+
+    def init_node (self,test_site,node_spec,path):
+
+        test_node = TestNode(self, test_site, node_spec)
+        test_node.create_node ("pi")
+        test_node.create_node ("tech")
+        test_node.create_boot_cd(node_spec,path)
+        return test_node
+    
+    def db_dump(self):
+        
+        t=datetime.datetime.now()
+        d=t.date()
+        dump='/var/lib/pgsql/backups/planetlab4-'+str(d)+'-2nodes'
+        os.system('chroot /plc/root pg_dump -U pgsqluser planetlab4 -f '+ dump)
+        print 'dump is done',dump
+        
+
+        
diff --git a/system/TestRestore.py b/system/TestRestore.py
new file mode 100755 (executable)
index 0000000..fa015c8
--- /dev/null
@@ -0,0 +1,109 @@
+#!/usr/bin/env python
+
+import os, sys, time
+from optparse import OptionParser
+import xmlrpclib
+
+class TestRestore:
+
+    subversion_id = "$Id$"
+
+    def __init__ (self):
+        self.url="https://localhost:443/PLCAPI/"
+       self.server=xmlrpclib.Server(self.url,allow_none=True)
+        self.path=os.path.dirname(sys.argv[0])
+
+###################3
+    def auth_root (self):
+       return {'Username':'root@onelab-test.inria.fr',
+               'AuthMethod':'password',
+               'AuthString':'test++',
+                'Role' : 'root'
+                }
+    
+##############check if the db version exsit
+    def check_dir(self,dbname):
+
+        config_file = "/plc/data/var/lib/pgsql/backups/"+dbname
+        if (os.path.isfile (config_file)):
+            print "==>dbversion found "
+            return 1
+        else:
+            print "\n %s  non-existing Bdd version\n" % config_file
+            return 0
+            
+##############restoring one db return list of host nodes
+    def restore_db(self,db,display):
+        try:
+            list_host=[]
+            ##stop httpd service
+            os.system('chroot /plc/root  service httpd stop')
+            ##droping
+            os.system(' echo drop database planetlab4 |chroot /plc/root psql --user=pgsqluser template1')
+            ##creating
+            os.system('chroot /plc/root  createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
+            ##populating
+            os.system('chroot /plc/root psql -U pgsqluser planetlab4 -f /var/lib/pgsql/backups/'+db)
+            ##starting httpd service
+            os.system('chroot /plc/root  service httpd start')
+
+            print 'db.restored'
+            hosts=self.server.GetNodes(self.auth_root())
+            for host in hosts:
+                print host['hostname']
+                list_host.append(host['hostname'])
+                
+            for l in list_host :
+                print display
+                os.system('DISPLAY=%s vmplayer %s/VirtualFile-%s/My_Virtual_Machine.vmx &'%(display,self.path,l))
+
+        except Exception, e:
+            print str(e)
+###########################    
+
+
+
+
+    def main (self):
+        try:
+            usage = """usage: %prog [options] BDDversion"""
+            parser=OptionParser(usage=usage,version=self.subversion_id)
+            # verbosity
+            parser.add_option("-v","--verbose", action="store_true", dest="verbose", default=False, 
+                              help="Run in verbose mode")
+            #exporting Display
+            parser.add_option("-d","--display", action="store", dest="Xdisplay", default='bellami:0.0',
+                              help="export the display on the mentionneted one")
+       
+        
+            (self.options, self.args) = parser.parse_args()
+            
+            hosts=[]
+            i=0
+            dirname =''
+            display=''
+           
+            
+            if (self.options.Xdisplay):
+                display=self.options.Xdisplay
+                print 'the display is', display
+       
+                
+            if (len(self.args) == 0 ):
+                parser.print_help()
+                sys.exit(1)
+            else:
+                dirname=self.args[0]    
+               
+            if (not (self.check_dir(dirname))):
+                 parser.print_help()
+                 sys.exit(1)
+                 
+            self.restore_db(dirname,display)
+            
+        except Exception, e:
+            print str(e)
+            
+if __name__ == "__main__":
+    TestRestore().main()       
+     
diff --git a/system/TestSite.py b/system/TestSite.py
new file mode 100644 (file)
index 0000000..f9d4caa
--- /dev/null
@@ -0,0 +1,223 @@
+import os
+import sys
+import datetime
+import time
+from TestConfig import *
+import xmlrpclib
+
+class TestSite:
+
+    def __init__ (self,test_plc,site_spec):
+       self.test_plc=test_plc
+       self.site_spec=site_spec
+        self.timset=time.strftime("%H:%M:%S", time.localtime())
+        
+    def create_site (self):
+        try:
+            print self.test_plc.auth_root()
+            self.site_id = self.test_plc.server.AddSite(self.test_plc.auth_root(),
+                                                       self.site_spec['site_fields'])
+           self.test_plc.server.AddSiteAddress(self.test_plc.auth_root(),self.site_id,
+                                      self.site_spec['site_address'])
+            
+           return self.site_id
+        except Exception, e:
+           print str(e)
+            
+    def site_id(self):
+       return self.site_id()
+
+    def create_user (self, user_spec):
+        try:
+            i=0
+            print '========>Adding user at '+self.timset+ ': ',user_spec
+            self.person_id=self.test_plc.server.AddPerson(self.test_plc.auth_root(),
+                                                          user_spec)
+            self.test_plc.server.UpdatePerson(self.test_plc.auth_root(),
+                                              self.person_id,{'enabled': True})
+            for role in user_spec['roles']:
+                self.test_plc.server.AddRoleToPerson(self.test_plc.auth_root(),
+                                                     role,user_spec['email'])
+            self.test_plc.server.AddPersonToSite(self.test_plc.auth_root(),
+                                                 user_spec['email'],
+                                                 self.site_spec['site_fields']['login_base'])
+        except Exception,e:
+            print str(e)
+            
+    def enable_user (self, user_spec):
+        try:
+            persones=self.test_plc.server.GetPersons(self.test_plc.auth_root())
+            for person in persones:
+                if (person['enabled']!="True"):
+                    self.test_plc.server.UpdatePerson(self.test_plc.auth_root(),
+                                                      person['person_id'],
+                                                      {'enabled': True})
+        except Exception,e:
+            print str(e)
+            
+    def add_key_user(self,user_spec):
+        try:
+            auth=""
+            for userspec in self.site_spec['users']:
+                if(user_spec == userspec):
+                    for role in userspec['roles']:
+                        auth=auth+role
+                    print auth
+                    self.test_plc.server.AddPersonKey(self.anyuser_auth(auth),
+                                                      user_spec['email'], key)
+        except Exception, e:
+            print str(e)
+            
+    def anyuser_auth (self,key):
+        for person in self.site_spec['users']:
+            if person['auth_meth']== key :
+                return {'Username':person['email'],
+                        'AuthMethod':'password',
+                        'AuthString':person['password'],
+                        'Role':person['roles'][0],
+                        }
+
+    def node_check_status(self,liste_nodes,bool):
+        try:
+            ret_value=True    
+            filter=['boot_state']
+            bt={'boot_state':'boot'}
+            dbg={'boot_state':'dbg'}
+            secondes=15
+            start_time = datetime.datetime.now() ##geting the current time
+            dead_time=datetime.datetime.now()+ datetime.timedelta(minutes=10)##adding 10minutes
+            start=time.strftime("%H:%M:%S", time.localtime())
+            print "time in the begining  is :",start
+            
+            for l in liste_nodes :
+                while (bool):
+                    node_status=self.test_plc.server.GetNodes(self.test_plc.auth_root(),
+                                                              l['hostname'], filter)
+                    timset=time.strftime("%H:%M:%S", time.localtime())
+                    print 'the actual status for the node '+l['hostname']+' at '+str(timset)+' is :',node_status
+                    try:
+                        if (node_status[0] == bt):
+                            test_name='\nTest Installation Node hosted: '+l['hostname']
+                            self.test_plc.affiche_results(test_name, 'Successful', '')##printing out the result
+                            break ##for exsiting and renaming virtual file to just installed
+                        elif (node_status[0] ==dbg):
+                            test_name='\nTest Installation Node hosted: '+l['hostname']
+                            self.test_plc.affiche_results(test_name, 'En Debug', '')##printing out the result
+                            bool=False
+                            break ##for exsiting and renaming virtual file to just installed
+                        elif ( start_time  <= dead_time ) :
+                            start_time=datetime.datetime.now()+ datetime.timedelta(minutes=2)
+                            time.sleep(secondes)
+                        else: bool=False
+                    except OSError ,e :
+                        bool=False
+                        str(e)
+                if (bool):
+                    print "Node correctly instaled and booted "
+                else :
+                    print "Node not fully booted "##cheek if configuration file already exist
+                    ret_value=False
+                    test_name='\nTest Installation Node Hosted: ',l['hostname']
+                    self.test_plc.affiche_results(test_name, 'Failure', '')##printing out the result
+            
+            end=time.strftime("%H:%M:%S", time.localtime())
+            print "time at the end is :",end  ##converting time to secondes
+            return ret_value
+        except Exception, e:
+            print str(e)
+            print "vmware killed if problems occur  "
+            time.sleep(10)
+            self.kill_all_vmwares()
+            sys.exit(1)
+            
+    def kill_all_vmwares(self):
+        os.system('pgrep vmware | xargs -r kill')
+        os.system('pgrep vmplayer | xargs -r kill ')
+        os.system('pgrep vmware | xargs -r kill -9')
+        os.system('pgrep vmplayer | xargs -r kill -9')
+        
+    def run_vmware(self,liste_nodes,display):
+        path=os.path.dirname(sys.argv[0])
+        print path
+        print " kill last vmware before any new  installation  "
+        self.kill_all_vmwares()
+        print 'i will be displayed here========>', display
+        arg='< /dev/null &>/dev/null &'
+        for l in liste_nodes :
+            #os.system('set -x; vmplayer  VirtualFile-%s/My_Virtual_Machine.vmx  %s '%(l['hostname'],arg))
+            os.system('set -x; DISPLAY=%s vmplayer %s/VirtualFile-%s/My_Virtual_Machine.vmx %s '%(display,path,l['hostname'],arg))
+
+    def delete_known_hosts(self):
+        try:
+            file1=open('/root/.ssh/known_hosts','r')
+            file2=open('/root/.ssh/known_hosts_temp','w')
+            while 1:
+                txt = file1.readline()
+                if txt=='':
+                    file1.close()
+                    file2.close()
+                    break
+                if txt[0:4]!='test' :
+                    file2.write(txt)
+            
+                
+            os.system('mv -f /root/.ssh/known_hosts_temp  /root/.ssh/known_hosts')
+        except Exception, e:
+            print str(e)
+
+    def slice_access(self,liste_nodes):
+        try:
+            bool=True
+            bool1=True
+            secondes=15
+            self.delete_known_hosts()
+            start_time = datetime.datetime.now()
+            dead_time=start_time + datetime.timedelta(minutes=3)##adding 3minutes
+            for slice in slices_specs:
+                for slicenode in slice['slice_nodes']:
+                    timset=time.strftime("%H:%M:%S", time.localtime())
+                    while(bool):
+                        print '=========>Try to Restart the Node Manager on %s at %s:'%(slicenode['hostname'],str(timset))
+                        access=os.system('set -x; ssh -i /etc/planetlab/root_ssh_key.rsa  root@%s service nm restart'%slicenode['hostname'] )
+                        if (access==0):
+                            print '=========>Node Manager Restarted on %s at %s:'%(slicenode['hostname'] ,str(timset))
+                            while(bool1):
+                                print '=========>Try to connect to the %s@%s at %s '%(slice['slice_spec']['name'],slicenode['hostname'],str(time.strftime("%H:%M:%S", time.localtime())))
+                                Date=os.system('set -x; ssh -i ~/.ssh/slices.rsa %s@%s echo "The Actual Time here is;" date'%(slice['slice_spec']['name'],slicenode['hostname']))
+                                if (Date==0):
+                                    break
+                                elif ( start_time  <= dead_time ) :
+                                    start_time=datetime.datetime.now()+ datetime.timedelta(seconds=30)
+                                    time.sleep(secondes)
+                                else:
+                                    bool1=False
+                            if(bool1):
+                                print '=========>connected to the '+slice['slice_spec']['name']+'@'+slicenode['hostname'] +'--->'
+                            else:
+                                print '=========>access to one slice is denied but last chance'
+                                print '=========>Retry to Restart the Node Manager on %s at %s:'%(slicenode['hostname'],str(timset))
+                                access=os.system('set -x; ssh -i /etc/planetlab/root_ssh_key.rsa  root@%s service nm restart'%slicenode['hostname'] )
+                                if (access==0):
+                                    print '=========>Retry to connect to the %s@%s at %s '%(slice['slice_spec']['name'],slicenode['hostname'],str(time.strftime("%H:%M:%S", time.localtime())))
+                                    Date=os.system('set -x; ssh -i ~/.ssh/slices.rsa %s@%s echo "The Actual Time here is;" date'%(slice['slice_spec']['name'],slicenode['hostname'] ))
+                                    if (Date==0):
+                                        print '=========>connected to the '+slice['slice_spec']['name']+'@'+slicenode['hostname']+'--->'
+                                    else:
+                                        print '=========>the Access is finaly denied'
+                                        sys.exit(1)
+                                else :"=========>Last try failed"
+                            break
+                        elif ( start_time  <= dead_time ) :
+                            start_time=datetime.datetime.now()+ datetime.timedelta(minutes=1)
+                            time.sleep(secondes)
+                        else:
+                            bool=False
+                                
+                    if (not bool):
+                        print 'Node manager problems'
+                        sys.exit(1)
+                    
+        except Exception, e:
+            print str(e)
+            sys.exit(1)
+   
diff --git a/system/tty_conf b/system/tty_conf
new file mode 100644 (file)
index 0000000..1cea152
--- /dev/null
@@ -0,0 +1,24 @@
+e PLC_NAME
+TestLab
+e PLC_ROOT_PASSWORD
+test++
+e PLC_ROOT_USER
+root@onelab-test.inria.fr
+e PLC_MAIL_ENABLED
+true
+e PLC_MAIL_SUPPORT_ADDRESS
+mohamed-amine.chaoui@sophia.inria.fr
+e PLC_DB_HOST
+onelab-test.inria.fr
+e PLC_API_HOST
+onelab-test.inria.fr
+e PLC_WWW_HOST
+onelab-test.inria.fr
+e PLC_BOOT_HOST
+onelab-test.inria.fr
+e PLC_NET_DNS1
+138.96.0.10
+e PLC_NET_DNS2
+138.96.0.11
+w
+q