- for keyname in user_spec['keynames']:
- key_spec=self.test_plc.locate_key(keyname)
- test_key=TestKey(self.test_plc,key_spec)
- publickey=test_key.publicpath()
- privatekey=test_key.privatepath()
- keyname=test_key.name()
- if os.path.isfile(publickey) and os.path.isfile(privatekey):
- found=True
- #create dir in plc root image
- remote_privatekey="/root/keys/%s.rsa"%keyname
- if not os.path.isdir("/plc/root/data/root/keys"):
- self.test_plc.run_in_guest("mkdir /root/keys" )
- self.test_plc.copy_in_guest(privatekey,remote_privatekey,True)
-
- return (found,remote_privatekey)
-
- def do_check_slices(self,options):
- bool=True
- self.clear_known_hosts()
- start_time = datetime.datetime.now()
- dead_time=start_time + datetime.timedelta(minutes=15)
- for slice_spec in self.test_plc.plc_spec['slices']:
- for hostname in slice_spec['nodenames']:
- slicename=slice_spec['slice_fields']['name']
- (found,remote_privatekey)=self.locate_key(slice_spec)
- if( not found):
- raise Exception,"Cannot find a valid key for slice %s"%slicename
- break
- while(bool):
- utils.header('trying to connect to %s@%s'%(slicename,hostname))
- Date=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,slicename,hostname))
- if (Date==0):
- break
- elif ( start_time <= dead_time ) :
- start_time=datetime.datetime.now()+ datetime.timedelta(seconds=45)
- time.sleep(45)
- elif (options.forcenm):
- utils.header('%s@%s : restarting nm in case is in option on %s'%(slicename,hostname,hostname))
- access=self.test_plc.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s service nm restart'%hostname)
- if (access==0):
- utils.header('nm restarted on %s'%hostname)
- else:
- utils.header('%s@%s : Failed to restart the NM on %s'%(slicename,hostname,hostname))
- utils.header('Try to reconnect to %s@%s after the tentative of restarting NM'%(slicename,hostname))
- connect=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,slicename,hostname))
- if (not connect):
- utils.header('connected to %s@%s -->'%(slicename,hostname))
- break
- else:
- utils.header('giving up with to %s@%s -->'%(slicename,hostname))
- bool=False
- break
- else:
- bool=False
- break
- return bool
-
-
+ key_names += user_spec['key_names']
+ return self.test_plc.locate_private_key_from_key_names (key_names)
+
+ # for TestPlc.slice_mapper__tasks
+ # i.e. returns a list of CompleterTasks that are merged into the same Completer run
+ # to avoid waiting for as many slices as the Plc has
+ # also the __doc__ lines are used for the TestPlc methods, e.g. just 'ssh_slice'
+ def ssh_slice__tasks (self, options, *args, **kwds):
+ "tries to ssh-enter the slice with the user key, to check for slice creation"
+ return self.ssh_tasks(options, expected=True, *args, **kwds)
+
+ # when we expect the slice is not reachable
+ def ssh_slice_off__tasks (self, options, *args, **kwds):
+ "tries to ssh-enter the slice with the user key, expecting it to be unreachable"
+ return self.ssh_tasks(options, expected=False, *args, **kwds)
+
+ def ssh_tasks(self,options, expected=True, command=None):
+# timeout_minutes=20, silent_minutes=10, period_seconds=15):
+# timeout = timedelta(minutes=timeout_minutes)
+# graceout = timedelta(minutes=silent_minutes)
+# period = timedelta(seconds=period_seconds)
+ if not command:
+ command = "echo hostname ; hostname; echo id; id; echo uname -a ; uname -a"
+ # locate a key
+ private_key = self.locate_private_key()
+ if not private_key :
+ utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
+ return False
+
+ # convert nodenames to real hostnames
+ if expected: msg="ssh slice access enabled"
+ else: msg="ssh slice access disabled"
+ utils.header("checking for {} -- slice {}".format(msg, self.name()))
+
+ tasks=[]
+ slicename=self.name()
+ dry_run = getattr(options, 'dry_run', False)
+ for nodename in self.slice_spec['nodenames']:
+ site_spec, node_spec = self.test_plc.locate_node(nodename)
+ tasks.append( CompleterTaskSliceSsh(self.test_plc, node_spec['node_fields']['hostname'],
+ slicename, private_key, command, expected, dry_run))
+ return tasks
+
+ def ssh_slice_basics (self, options, *args, **kwds):
+ "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
+ overall = True
+ if not self.do_ssh_slice_once(options, expected=True, command='true'): overall=False
+ if not self.do_ssh_slice_once(options, expected=False, command='false'): overall=False
+ if not self.do_ssh_slice_once(options, expected=False, command='someimprobablecommandname'): overall=False
+ if not self.do_ssh_slice_once(options, expected=True, command='ps'): overall=False
+ if not self.do_ssh_slice_once(options, expected=False, command='ls /vservers'): overall=False
+ return overall
+
+ # pick just one nodename and runs the ssh command once
+ def do_ssh_slice_once(self, options, command, expected):
+ # locate a key
+ private_key=self.locate_private_key()
+ if not private_key :
+ utils.header("WARNING: Cannot find a valid key for slice {}".format(self.name()))
+ return False
+
+ # convert nodenames to real hostnames
+ slice_spec = self.slice_spec
+ nodename=slice_spec['nodenames'][0]
+ site_spec, node_spec = self.test_plc.locate_node(nodename)
+ hostname=node_spec['node_fields']['hostname']
+
+ if expected:
+ msg="{} to return TRUE from ssh".format(command)
+ else:
+ msg="{} to return FALSE from ssh".format(command)
+
+ utils.header("checking {} -- slice {} on node {}".format(msg, self.name(), hostname))
+ site_spec, node_spec = self.test_plc.locate_hostname(hostname)
+ test_ssh = TestSsh (hostname, key=private_key, username=self.name())
+ full_command = test_ssh.actual_command(command)
+ retcod = utils.system (full_command, silent=True, timeout=10)
+ if getattr(options, 'dry_run', None):
+ return True
+ if expected:
+ success = retcod==0
+ else:
+ success = retcod!=0
+ if not success:
+ utils.header ("WRONG RESULT for {}".format(msg))
+ return success
+
+ # for TestPlc.slice_mapper__tasks
+ # check that /vservers/<> is present/deleted
+ def slice_fs_present__tasks (self, options):
+ "checks that /vservers/<slicename> exists on the filesystem"
+ return self.check_rootfs_tasks(options, expected=True)
+ def slice_fs_deleted__tasks (self, options):
+ "checks that /vservers/<slicename> has been properly wiped off"
+ return self.check_rootfs_tasks (options, expected=False)