turn off testing of the omf feature that has been removed
[tests.git] / system / TestSlice.py
index 27a0ccc..bfe0b58 100644 (file)
+# -*- python3 -*-
+# Thierry Parmentelat <thierry.parmentelat@inria.fr>
+# Copyright (C) 2015 INRIA 
+#
 import utils
 import os, os.path
-import datetime
+from datetime import datetime, timedelta
 import time
 
 from TestKey import TestKey
 from TestUser import TestUser
-from TestNode import TestNode
+from TestNode import TestNode, CompleterTaskNodeSsh
+from TestSsh import TestSsh
+from Completer import CompleterTask
 
-class TestSlice:
+class CompleterTaskSliceSsh (CompleterTask):
+
+    def __init__ (self, test_plc, hostname, slicename, private_key, command, expected, dry_run):
+        self.test_plc = test_plc
+        self.hostname = hostname
+        self.slicename = slicename
+        self.private_key = private_key
+        self.command = command
+        self.dry_run = dry_run
+        self.expected = expected
+
+    def run (self, silent): 
+        site_spec, node_spec = self.test_plc.locate_hostname(self.hostname)
+        test_ssh = TestSsh (self.hostname, key=self.private_key, username=self.slicename)
+        full_command = test_ssh.actual_command(self.command)
+        retcod = utils.system (full_command, silent=silent, timeout=10)
+        if self.dry_run:        return True
+        if self.expected:       return retcod == 0
+        else:                   return retcod != 0
 
-    def __init__ (self,test_plc,test_site,slice_spec):
-       self.test_plc=test_plc
-        self.test_site=test_site
-       self.slice_spec=slice_spec
+    def failure_epilogue (self):
+        if self.expected:
+            print("Could not ssh into sliver {}@{}".format(self.slicename, self.hostname))
+        else:
+            print("Could still ssh into sliver{}@{} (that was expected to be down)"\
+                .format(self.slicename, self.hostname))
+
+class TestSlice:
 
+    def __init__ (self, test_plc, test_site, slice_spec):
+        self.test_plc = test_plc
+        self.test_site = test_site
+        self.slice_spec = slice_spec
+        self.test_ssh = TestSsh(self.test_plc.test_ssh)
+        
     def name(self):
         return self.slice_spec['slice_fields']['name']
+    
+    def get_slice(self, slice_name):
+        for slice_spec in self.test_plc.plc_spec['slices']:
+            if slice_spec['slice_fields']['name'] == slice_name:
+                return slice_spec
 
-    def delete_slice(self):
+    def owner_auth(self):
         owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
-        auth = TestUser(self,self.test_site,owner_spec).auth()
-        slice_fields = self.slice_spec['slice_fields']
-        slice_name = slice_fields['name']
-        self.test_plc.server.DeleteSlice(auth,slice_fields['name'])
-        utils.header("Deleted slice %s"%slice_fields['name'])
+        return TestUser(self, self.test_site, owner_spec).auth()
 
-    
+    def slice_name (self):
+        return self.slice_spec['slice_fields']['name']
+
+    # init slice with people, and then add nodes 
     def create_slice(self):
-        owner_spec = self.test_site.locate_user(self.slice_spec['owner'])
-        auth = TestUser(self,self.test_site,owner_spec).auth()
+        auth = self.owner_auth()
         slice_fields = self.slice_spec['slice_fields']
         slice_name = slice_fields['name']
-
-        self.test_plc.server.AddSlice(auth,slice_fields)
+        utils.header("Creating slice {}".format(slice_name))
+        self.test_plc.apiserver.AddSlice(auth, slice_fields)
         for username in self.slice_spec['usernames']:
-                user_spec=self.test_site.locate_user(username)
-                test_user=TestUser(self,self.test_site,user_spec)
-                self.test_plc.server.AddPersonToSlice(auth, test_user.name(), slice_name)
+                user_spec = self.test_site.locate_user(username)
+                test_user = TestUser(self, self.test_site, user_spec)
+                self.test_plc.apiserver.AddPersonToSlice(auth, test_user.name(), slice_name)
+        # add initscript code or name as appropriate
+        if 'initscriptcode' in self.slice_spec:
+            iscode = self.slice_spec['initscriptcode']
+            utils.header("Adding initscript code {} in {}".format(iscode, slice_name))
+            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
+                                                'initscript_code', iscode)
+        elif 'initscriptname' in self.slice_spec:
+            isname = self.slice_spec['initscriptname']
+            utils.header("Adding initscript name {} in {}".format(isname, slice_name))
+            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,
+                                                'initscript', isname)
+# omf-friendly slices is a deprecated feature
+#        if 'omf-friendly' in self.slice_spec:
+#            utils.header("Making slice {} OMF-friendly".format(slice_name))
+#            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'vref', 'omf')
+#            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name, 'omf_control', 'yes')
+#
+# setting vref directly like this was useful for multi-arch tests long ago - see wifilab
+# however this should rather use other tags by now, so we drop this for now
+#        if self.slice_spec.has_key ('vref'):
+#            vref_value = self.slice_spec['vref']
+#            self.test_plc.apiserver.AddSliceTag(self.test_plc.auth_root(), slice_name,'vref',vref_value)
+        # epilogue
+        self.add_nodes()
+
+    def check_vsys_defaults (self, options, *args, **kwds):
+        "check vsys tags match PLC_VSYS_DEFAULTS"
+        auth = self.owner_auth()
+        slice_fields = self.slice_spec['slice_fields']
+        slice_name = slice_fields['name']
+        vsys_tags = self.test_plc.apiserver.GetSliceTags (auth, {'tagname' : 'vsys', 'name' : slice_name})
+        values = [st['value'] for st in vsys_tags]
+        expected = self.test_plc.plc_spec['expected_vsys_tags']
+        result = set(values) == set(expected)
+        if not result:
+            print('Check vsys defaults with slice {}'.format(slice_name))
+            print('Expected {}'.format(expected))
+            print('Got {}'.format(values))
+        return result
 
+    # just add the nodes and handle tags
+    def add_nodes (self):
+        auth = self.owner_auth()
+        slice_name = self.slice_name()
         hostnames=[]
         for nodename in self.slice_spec['nodenames']:
             node_spec=self.test_site.locate_node(nodename)
-            test_node=TestNode(self,self.test_site,node_spec)
+            test_node=TestNode(self.test_plc, self.test_site, node_spec)
             hostnames += [test_node.name()]
-        utils.header("Adding %r in %s"%(hostnames,slice_name))
-        self.test_plc.server.AddSliceToNodes(auth, slice_name, hostnames)
-        if self.slice_spec.has_key('initscriptname'):
-            isname=self.slice_spec['initscriptname']
-            utils.header("Adding initscript %s in %s"%(isname,slice_name))
-            self.test_plc.server.AddSliceAttribute(self.test_plc.auth_root(), slice_name,'initscript',isname)
+        utils.header("Adding {} in {}".format(hostnames, slice_name))
+        self.test_plc.apiserver.AddSliceToNodes(auth, slice_name, hostnames)
         
-    def clear_known_hosts (self):
-        utils.header("Messing with known_hosts for slice %s"%self.name())
-        # scan nodenames
-        for nodename in self.slice_spec['nodenames']:
-            self.test_plc.run_in_guest("sed -i -e /^%s/d /root/.ssh/known_hosts"%nodename)
-        #scan public key and update the known_host file in the root image
-        self.test_plc.scan_publicKeys(self.slice_spec['nodenames'])
-        
-    def locate_key(self,slice_spec):
-        # locate the first avail. key
-        found=False
-        for username in slice_spec['usernames']:
+    # trash the slice altogether
+    def delete_slice(self):
+        auth = self.owner_auth()
+        slice_name = self.slice_name()
+        utils.header("Deleting slice {}".format(slice_name))
+        self.test_plc.apiserver.DeleteSlice(auth, slice_name)
+
+    # keep the slice alive and just delete nodes
+    def delete_nodes (self):
+        auth = self.owner_auth()
+        slice_name = self.slice_name()
+        print('retrieving slice {}'.format(slice_name))
+        slice=self.test_plc.apiserver.GetSlices(auth, slice_name)[0]
+        node_ids=slice['node_ids']
+        utils.header ("Deleting {} nodes from slice {}"\
+                      .format(len(node_ids), slice_name))
+        self.test_plc.apiserver.DeleteSliceFromNodes (auth, slice_name, node_ids)
+
+    def locate_private_key(self):
+        key_names=[]
+        for username in self.slice_spec['usernames']:
             user_spec=self.test_site.locate_user(username)
-            for keyname in user_spec['keynames']:
-                key_spec=self.test_plc.locate_key(keyname)
-                test_key=TestKey(self.test_plc,key_spec)
-                publickey=test_key.publicpath()
-                privatekey=test_key.privatepath()
-                keyname=test_key.name()
-                if os.path.isfile(publickey) and os.path.isfile(privatekey):
-                    found=True
-        #create dir in plc root image
-        remote_privatekey="/root/keys/%s.rsa"%keyname
-        if not os.path.isfile(remote_privatekey):
-            self.test_plc.run_in_guest("mkdir  /root/keys" )
-            self.test_plc.copy_in_guest(privatekey,remote_privatekey,True)
-
-        return (found,remote_privatekey)
-
-    def do_check_slice(self,options):
-        bool=True
-        self.clear_known_hosts()
-        start_time = datetime.datetime.now()
-        dead_time=start_time + datetime.timedelta(minutes=15)
+            key_names += user_spec['key_names']
+        return self.test_plc.locate_private_key_from_key_names (key_names)
+
+    # for TestPlc.slice_mapper__tasks
+    # i.e. returns a list of CompleterTasks that are merged into the same Completer run
+    # to avoid waiting for as many slices as the Plc has
+    # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
+    def ssh_slice__tasks (self, options, *args, **kwds):
+        "tries to ssh-enter the slice with the user key, to check for slice creation"
+        return self.ssh_tasks(options, expected=True, *args, **kwds)
+
+    # when we expect the slice is not reachable
+    def ssh_slice_off__tasks (self, options, *args, **kwds):
+        "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
+        return self.ssh_tasks(options, expected=False, *args, **kwds)
+
+    def ssh_tasks(self,options, expected=True, command=None):
+#                     timeout_minutes=20, silent_minutes=10, period_seconds=15):
+#        timeout  = timedelta(minutes=timeout_minutes)
+#        graceout = timedelta(minutes=silent_minutes)
+#        period   = timedelta(seconds=period_seconds)
+        if not command:
+            command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
+        # locate a key
+        private_key = self.locate_private_key()
+        if not private_key :
+            utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
+            return False
+
+        # convert nodenames to real hostnames
+        if expected:    msg="ssh slice access enabled"
+        else:           msg="ssh slice access disabled"
+        utils.header("checking for {} -- slice {}".format(msg, self.name()))
+
+        tasks=[]
+        slicename=self.name()
+        dry_run = getattr(options, 'dry_run', False)
+        for nodename in self.slice_spec['nodenames']:
+            site_spec, node_spec = self.test_plc.locate_node(nodename)
+            tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
+                                                slicename, private_key, command, expected, dry_run))
+        return tasks
+
+    def ssh_slice_basics (self, options, *args, **kwds):
+        "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
+        overall = True
+        if not self.do_ssh_slice_once(options, expected=True,  command='true'): overall=False
+        if not self.do_ssh_slice_once(options, expected=False, command='false'): overall=False
+        if not self.do_ssh_slice_once(options, expected=False, command='someimprobablecommandname'): overall=False
+        if not self.do_ssh_slice_once(options, expected=True,  command='ps'): overall=False
+        if not self.do_ssh_slice_once(options, expected=False, command='ls /vservers'): overall=False
+        return overall
+
+    # pick just one nodename and runs the ssh command once
+    def do_ssh_slice_once(self, options, command, expected):
+        # locate a key
+        private_key=self.locate_private_key()
+        if not private_key :
+            utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
+            return False
+
+        # convert nodenames to real hostnames
         slice_spec = self.slice_spec
-        for hostname in slice_spec['nodenames']:
-            (site_spec,node_spec) = self.test_plc.locate_node(hostname)
-            if TestNode.is_real_model(node_spec['node_fields']['model']):
-                utils.header("WARNING : Checking slice %s on real node %s skipped"%(self.name(),hostname))
-                continue
-            (found,remote_privatekey)=self.locate_key(slice_spec)
-            if not found :
-                raise Exception,"Cannot find a valid key for slice %s"%self.name()
-                break 
-            while (bool):
-                utils.header('trying to connect to %s@%s'%(self.name(),hostname))
-                Date=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,self.name(),hostname))
-                if (Date==0):
-                    break
-                elif ( start_time  <= dead_time ) :
-                    start_time=datetime.datetime.now()+ datetime.timedelta(seconds=45)
-                    time.sleep(45)
-                elif (options.forcenm):
-                    utils.header('%s@%s : restarting nm in case is in option on %s'%(self.name(),hostname,hostname))
-                    access=self.test_plc.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa  root@%s service nm restart'%hostname)
-                    if (access==0):
-                        utils.header('nm restarted on %s'%hostname)
-                    else:
-                        utils.header('%s@%s : Failed to restart the NM on %s'%(self.name(),hostname,hostname))
-                    utils.header('Try to reconnect to  %s@%s after the tentative of restarting NM'%(self.name(),hostname))
-                    connect=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,self.name(),hostname))
-                    if (not connect):
-                        utils.header('connected to %s@%s -->'%(self.name(),hostname))
-                        break
-                    else:
-                        utils.header('giving up with to %s@%s -->'%(self.name(),hostname))
-                        bool=False
-                        break
-                else:
-                    bool=False
-                    break
-        return bool
+        nodename=slice_spec['nodenames'][0]
+        site_spec, node_spec = self.test_plc.locate_node(nodename)
+        hostname=node_spec['node_fields']['hostname']
 
-         
+        if expected:
+            msg="{} to return TRUE from ssh".format(command)
+        else:
+            msg="{} to return FALSE from ssh".format(command)
+            
+        utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname))
+        site_spec, node_spec = self.test_plc.locate_hostname(hostname)
+        test_ssh = TestSsh (hostname, key=private_key, username=self.name())
+        full_command = test_ssh.actual_command(command)
+        retcod = utils.system (full_command, silent=True, timeout=10)
+        if getattr(options, 'dry_run', None):
+            return True
+        if expected:
+            success = retcod==0
+        else:
+            success = retcod!=0
+        if not success:
+            utils.header ("WRONG RESULT for {}".format(msg))
+        return success
 
+    # for TestPlc.slice_mapper__tasks
+    # check that /vservers/<> is present/deleted
+    def slice_fs_present__tasks (self, options): 
+        "checks that /vservers/<slicename> exists on the filesystem"
+        return self.check_rootfs_tasks(options, expected=True)
+    def slice_fs_deleted__tasks (self, options): 
+        "checks that /vservers/<slicename> has been properly wiped off"
+        return self.check_rootfs_tasks (options, expected=False)
+
+    def check_rootfs_tasks (self, options, expected):
+        # use constant admin key
+        local_key = "keys/key_admin.rsa"
+        node_infos = self.test_plc.all_node_infos()
+        rootfs="/vservers/{}".format(self.name())
+        class CompleterTaskRootfs (CompleterTaskNodeSsh):
+            def __init__ (self, nodename, qemuname):
+                CompleterTaskNodeSsh.__init__(self, nodename, qemuname, local_key, expected=expected,
+                                              command="ls -d {}".format(rootfs))
+            def failure_epilogue (self):
+                if expected:
+                    print("Could not stat {} - was expected to be present".format(rootfs))
+                else:
+                    print("Sliver rootfs {} still present - this is unexpected".format(rootfs))
+                    utils.system(self.test_ssh.actual_command("ls -l {rootfs}; du -hs {rootfs}".format(**locals()),
+                                                              dry_run=self.dry_run, timeout=20))
+        return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]