checking system slice will try netflow and drl
[tests.git] / system / TestNode.py
index 11e9391..9a980f5 100644 (file)
@@ -2,7 +2,6 @@
 # Copyright (C) 2010 INRIA 
 #
 import sys, os, os.path, time, base64
-import xmlrpclib
 
 import utils
 from TestUser import TestUser
@@ -67,17 +66,30 @@ class TestNode:
         utils.header("node %s created by user %s"%(self.name(),test_user.name()))
         rootauth=self.test_plc.auth_root()
         server = self.test_plc.apiserver
-        server.AddNode(userauth,
-                       self.test_site.site_spec['site_fields']['login_base'],
-                       self.node_spec['node_fields'])
+        node_id=server.AddNode(userauth,
+                               self.test_site.site_spec['site_fields']['login_base'],
+                               self.node_spec['node_fields'])
         server.SetNodePlainBootstrapfs(userauth,
                                        self.node_spec['node_fields']['hostname'],
                                        'YES')
         # create as reinstall to avoid user confirmation
         server.UpdateNode(userauth, self.name(), {'boot_state':'reinstall'})
-        # populate network interfaces - primary
-        server.AddInterface(userauth,self.name(),
-                                            self.node_spec['interface_fields'])
+
+        if not self.test_plc.has_addresses_api():
+#            print 'USING OLD INTERFACE'
+            # populate network interfaces - primary
+            server.AddInterface(userauth,self.name(),
+                                self.node_spec['interface_fields'])
+        else:
+#            print 'USING NEW INTERFACE with separate ip addresses'
+            # this is for setting the 'dns' stuff that now goes with the node
+            server.UpdateNode (userauth, self.name(), self.node_spec['node_fields_nint'])
+            interface_id = server.AddInterface (userauth, self.name(),self.node_spec['interface_fields_nint'])
+            server.AddIpAddress (userauth, interface_id, self.node_spec['ipaddress_fields'])
+            route_fields=self.node_spec['route_fields']
+            route_fields['interface_id']=interface_id
+            server.AddRoute (userauth, node_id, self.node_spec['route_fields'])
+            pass
         # populate network interfaces - others
         if self.node_spec.has_key('extra_interfaces'):
             for interface in self.node_spec['extra_interfaces']:
@@ -109,6 +121,7 @@ class TestNode:
 
     # Do most of the stuff locally - will be pushed on host_box - *not* the plc - later if needed
     def qemu_local_init(self):
+        "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
         utils.system("rm -rf %s"%self.nodedir())
         utils.system("mkdir %s"%self.nodedir())
         if not self.is_qemu():
@@ -116,6 +129,7 @@ class TestNode:
         return utils.system("rsync -v -a --exclude .svn template-qemu/ %s/"%self.nodedir())==0
 
     def bootcd(self):
+        "all nodes: invoke GetBootMedium and store result locally"
         utils.header("Calling GetBootMedium for %s"%self.name())
         options = []
         if self.is_qemu():
@@ -136,21 +150,25 @@ class TestNode:
             return True
 
     def nodestate_reinstall (self):
+        "all nodes: mark PLCAPI boot_state as reinstall"
         self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
                                            self.name(),{'boot_state':'reinstall'})
         return True
     
     def nodestate_safeboot (self):
+        "all nodes: mark PLCAPI boot_state as safeboot"
         self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
                                            self.name(),{'boot_state':'safeboot'})
         return True
     
     def nodestate_boot (self):
+        "all nodes: mark PLCAPI boot_state as boot"
         self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
                                            self.name(),{'boot_state':'boot'})
         return True
 
     def nodestate_show (self):
+        "all nodes: show PLCAPI boot_state"
         if self.test_plc.options.dry_run:
             print "Dry_run: skipped getting current node state"
             return True
@@ -159,6 +177,7 @@ class TestNode:
         return True
     
     def qemu_local_config(self):
+        "all nodes: compute qemu config qemu.conf and store it locally"
         if not self.is_qemu():
             return
         mac=self.node_spec['interface_fields']['mac']
@@ -181,6 +200,7 @@ class TestNode:
         return True
 
     def qemu_export (self):
+        "all nodes: push local node-dep directory on the qemu box"
         # if relevant, push the qemu area onto the host box
         if self.test_box().is_local():
             return True
@@ -190,6 +210,7 @@ class TestNode:
         return self.test_box().copy(self.nodedir(),recursive=True)==0
             
     def qemu_start (self):
+        "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
         model=self.node_spec['node_fields']['model']
         #starting the Qemu nodes before 
         if self.is_qemu():
@@ -199,6 +220,7 @@ class TestNode:
         return True
 
     def timestamp_qemu (self):
+        "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
         test_box = self.test_box()
         test_box.run_in_buildname("mkdir -p %s"%self.nodedir())
         now=int(time.time())
@@ -235,6 +257,7 @@ class TestNode:
         self.test_box().test_ssh.fetch(remote_log,local_log)
 
     def keys_clear_known_hosts (self):
+        "remove test nodes entries from the local known_hosts file"
         TestSsh(self.name()).clear_known_hosts()
         return True
 
@@ -244,7 +267,7 @@ class TestNode:
 ###        # assuming we've run testplc.fetch_keys()
 ###        key = "keys/%(vservername)s.rsa"%locals()
         # fetch_keys doesn't grab the root key anymore
-        key = "keys/key1.rsa"
+        key = "keys/key_admin.rsa"
         return TestSsh(self.name(), buildname=self.buildname(), key=key)
 
     def check_hooks (self):
@@ -271,3 +294,26 @@ class TestNode:
             utils.header ("SUCCESS: node hook %s OK"%script_name)
             return True
 
+    def has_libvirt (self):
+        test_ssh=self.create_test_ssh()
+        return test_ssh.run ("rpm -q --quiet libvirt-client")==0
+
+    def _check_system_slice (self, slicename,dry_run=False):
+        sitename=self.test_plc.plc_spec['PLC_SLICE_PREFIX']
+        vservername="%s_%s"%(sitename,slicename)
+        test_ssh=self.create_test_ssh()
+        if self.has_libvirt():
+            utils.header("Checking system slice %s using virsh"%slicename)
+            return test_ssh.run("virsh --connect lxc:// list | grep -q ' %s '"%vservername,
+                                dry_run=dry_run)==0
+        else:
+            (retcod,output)=utils.output_of(test_ssh.actual_command("cat /vservers/%s/etc/slicefamily 2> /dev/null")%vservername)
+            # get last line only as ssh pollutes the output
+            slicefamily=output.split("\n")[-1]
+            utils.header("Found slicefamily '%s'for slice %s"%(slicefamily,slicename))
+            if retcod != 0: 
+                return False
+            utils.header("Checking system slice %s using vserver-stat"%slicename)
+            return test_ssh.run("vserver-stat | grep %s"%vservername,dry_run=dry_run)==0
+        
+