- self.test_plc.run_in_guest("sed -i -e /^%s/d /root/.ssh/known_hosts"%nodename)
-
- ###the logic is quit wrong, must be rewritten
- def do_check_slices(self):
- # Do not wait here, as this step can be run directly in which case you don't want to wait
- # just add the 5 minutes to the overall timeout
- #utils.header("Waiting for the nodes to fully boot")
- #time.sleep(300)
- bool=bool1=True
- secondes=15
- self.clear_known_hosts()
- start_time = datetime.datetime.now()
- dead_time=start_time + datetime.timedelta(minutes=11)
- for slice_spec in self.test_plc.plc_spec['slices']:
- for hostname in slice_spec['nodenames']:
- slicename=slice_spec['slice_fields']['name']
- # locate the first avail. key
- found=False
- for username in slice_spec['usernames']:
- user_spec=self.test_site.locate_user(username)
- for keyname in user_spec['keynames']:
- key_spec=self.test_plc.locate_key(keyname)
- test_key=TestKey(self.test_plc,key_spec)
- publickey=test_key.publicpath()
- privatekey=test_key.privatepath()
- keyname=test_key.name()
- if os.path.isfile(publickey) and os.path.isfile(privatekey):
- found=True
- break
- if not found:
- raise Exception,"Cannot find a valid key for slice %s"%slicename
-
- # create dir in plc root image
- self.test_plc.run_in_guest("mkdir /root/keys")
- remote_privatekey="/root/keys/%s.rsa"%keyname
- self.test_plc.copy_in_guest(privatekey,remote_privatekey,True)
- while(bool):
- utils.header('restarting nm on %s'%hostname)
- access=self.test_plc.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s service nm restart'%hostname )
- if (access==0):
- utils.header('nm restarted on %s'%hostname)
- while(bool1):
- utils.header('trying to connect to %s@%s'%(slicename,hostname))
- Date=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,slicename,hostname))
- if (Date==0):
- break
- elif ( start_time <= dead_time ) :
- start_time=datetime.datetime.now()+ datetime.timedelta(seconds=30)
- time.sleep(secondes)
- else:
- bool1=False
- if(bool1):
- utils.header('connected to %s@%s -->'%(slicename,hostname))
- else:
- utils.header('%s@%s : last chance - restarting nm on %s'%(slicename,hostname,hostname))
- access=self.test_plc.run_in_guest('ssh -i /etc/planetlab/root_ssh_key.rsa root@%s service nm restart'%hostname)
- time.sleep(240)##temoprally adding some delay due to the network slowness
- if (access==0):
- utils.header('trying to connect (2) to %s@%s'%(slicename,hostname))
- Date=self.test_plc.run_in_guest('ssh -i %s %s@%s date'%(remote_privatekey,slicename,hostname))
- if (Date==0):
- utils.header('connected to %s@%s -->'%(slicename,hostname))
- else:
- utils.header('giving up with to %s@%s -->'%(slicename,hostname))
- return False
- else :
- utils.header('Last chance failed on %s@%s -->'%(slicename,hostname))
- break
- elif ( start_time <= dead_time ) :
- start_time=datetime.datetime.now()+ datetime.timedelta(minutes=1)
- time.sleep(secondes)
- else:
- bool=False
-
- return bool
-
+ (site_spec,node_spec) = self.test_plc.locate_node(nodename)
+ tasks.append( CompleterTaskSliceSsh(self.test_plc,node_spec['node_fields']['hostname'],
+ slicename,private_key,command,expected,dry_run))
+ return tasks
+# return Completer (tasks).run (timeout, graceout, period)
+
+ def ssh_slice_basics (self, options, *args, **kwds):
+ "the slice is expected to be UP and we just check a few simple sanity commands, including 'ps' to check for /proc"
+ overall=True
+ if not self.do_ssh_slice_once(options,expected=True, command='true'): overall=False
+ if not self.do_ssh_slice_once(options,expected=False, command='false'): overall=False
+ if not self.do_ssh_slice_once(options,expected=False, command='someimprobablecommandname'): overall=False
+ if not self.do_ssh_slice_once(options,expected=True, command='ps'): overall=False
+ if not self.do_ssh_slice_once(options,expected=False, command='ls /vservers'): overall=False
+ return overall
+
+ # pick just one nodename and runs the ssh command once
+ def do_ssh_slice_once(self,options,command,expected):
+ # locate a key
+ private_key=self.locate_private_key()
+ if not private_key :
+ utils.header("WARNING: Cannot find a valid key for slice %s"%self.name())
+ return False
+
+ # convert nodenames to real hostnames
+ slice_spec = self.slice_spec
+ nodename=slice_spec['nodenames'][0]
+ (site_spec,node_spec) = self.test_plc.locate_node(nodename)
+ hostname=node_spec['node_fields']['hostname']
+
+ if expected: msg="%s to return TRUE from ssh"%command
+ else: msg="%s to return FALSE from ssh"%command
+
+ utils.header("checking %s -- slice %s on node %s"%(msg,self.name(),hostname))
+ (site_spec,node_spec) = self.test_plc.locate_hostname(hostname)
+ test_ssh = TestSsh (hostname,key=private_key,username=self.name())
+ full_command = test_ssh.actual_command(command)
+ retcod = utils.system (full_command,silent=True)
+ if getattr(options,'dry_run',None): return True
+ if expected: success = retcod==0
+ else: success = retcod!=0
+ if not success: utils.header ("WRONG RESULT for %s"%msg)
+ return success
+
+ # for TestPlc.slice_mapper__tasks
+ # check that /vservers/<> is present/deleted
+ def slice_fs_present__tasks (self, options):
+ "checks that /vservers/<slicename> exists on the filesystem"
+ return self.check_rootfs_tasks(options,expected=True)
+ def slice_fs_deleted__tasks (self, options):
+ "checks that /vservers/<slicename> has been properly wiped off"
+ return self.check_rootfs_tasks (options,expected=False)
+
+ def check_rootfs_tasks (self, options, expected):
+ # use constant admin key
+ local_key = "keys/key_admin.rsa"
+ node_infos = self.test_plc.all_node_infos()
+ rootfs="/vservers/%s"%self.name()
+ class CompleterTaskRootfs (CompleterTaskNodeSsh):
+ def __init__ (self, nodename, qemuname):
+ CompleterTaskNodeSsh.__init__(self,nodename, qemuname, local_key, expected=expected,
+ command="ls -d %s"%rootfs)
+ def failure_epilogue (self):
+ if expected:
+ print "Could not stat %s - was expected to be present"%rootfs
+ else:
+ print "Sliver rootfs %s still present - this is unexpected"%rootfs
+ utils.system(self.test_ssh.actual_command("ls -l %s; du -hs %s"%(rootfs,rootfs),dry_run=self.dry_run))
+ return [ CompleterTaskRootfs (nodename, qemuname) for (nodename,qemuname) in node_infos ]