class TestPlc:
default_steps = ['uninstall','install','install_rpm',
- 'configure', 'start', SEP,
+ 'configure', 'start', 'fetch_keys', SEP,
'store_keys', 'clear_known_hosts', 'initscripts', SEP,
'sites', 'nodes', 'slices', 'nodegroups', SEP,
'init_node','bootcd', 'configure_qemu', 'export_qemu',
'kill_all_qemus', 'reinstall_node','start_node', SEP,
- 'nodes_booted', 'nodes_ssh', 'check_slice',
- 'check_initscripts', 'check_tcp',SEP,
+ 'nodes_booted', 'nodes_ssh', 'check_slice', 'check_initscripts', SEP,
+ 'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
- other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', SEP,
- 'clean_initscripts', 'clean_sites', 'clean_nodes',
+ other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
+ 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
+ 'clean_sites', 'clean_nodes',
'clean_slices', 'clean_keys', SEP,
'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
'db_dump' , 'db_restore', ' cleanup_tracker',
def actual_command_in_guest (self,command):
return self.test_ssh.actual_command(self.host_to_guest(command))
+ def start_guest (self):
+ return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
+
def run_in_guest (self,command):
return utils.system(self.actual_command_in_guest(command))
def host_to_guest(self,command):
return "vserver %s exec %s"%(self.vservername,command)
+ #command gets run in the vserver
+ def start_guest_in_host(self):
+ return "vserver %s start"%(self.vservername)
+
# xxx quick n dirty
def run_in_guest_piped (self,local,remote):
return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
return slice
raise Exception,"Cannot locate slice %s"%slicename
+ def all_sliver_objs (self):
+ result=[]
+ for slice_spec in self.plc_spec['slices']:
+ slicename = slice_spec['slice_fields']['name']
+ for nodename in slice_spec['nodenames']:
+ result.append(self.locate_sliver_obj (nodename,slicename))
+ return result
+
+ def locate_sliver_obj (self,nodename,slicename):
+ (site,node) = self.locate_node(nodename)
+ slice = self.locate_slice (slicename)
+ # build objects
+ test_site = TestSite (self, site)
+ test_node = TestNode (self, test_site,node)
+ # xxx the slice site is assumed to be the node site - mhh - probably harmless
+ test_slice = TestSlice (self, test_site, slice)
+ return TestSliver (self, test_node, test_slice)
+
+ def locate_first_node(self):
+ nodename=self.plc_spec['slices'][0]['nodenames'][0]
+ (site,node) = self.locate_node(nodename)
+ test_site = TestSite (self, site)
+ test_node = TestNode (self, test_site,node)
+ return test_node
+
+ def locate_first_sliver (self):
+ slice_spec=self.plc_spec['slices'][0]
+ slicename=slice_spec['slice_fields']['name']
+ nodename=slice_spec['nodenames'][0]
+ return self.locate_sliver_obj(nodename,slicename)
+
# all different hostboxes used in this plc
def gather_hostBoxes(self):
# maps on sites and nodes, return [ (host_box,test_node) ]
### install
def install(self):
- # we need build dir for vtest-init-vserver
if self.is_local():
# a full path for the local calls
- build_dir=os.path.dirname(sys.argv[0])+"/build"
+ build_dir=os.path.dirname(sys.argv[0])
+ # sometimes this is empty - set to "." in such a case
+ if not build_dir: build_dir="."
+ build_dir += "/build"
else:
# use a standard name - will be relative to remote buildname
build_dir="build"
def start(self):
self.run_in_guest('service plc start')
return True
-
+
def stop(self):
self.run_in_guest('service plc stop')
return True
- # could use a TestKey class
+ def vs_start (self):
+ self.start_guest()
+ return True
+
+ # stores the keys from the config for further use
def store_keys(self):
for key_spec in self.plc_spec['keys']:
TestKey(self,key_spec).store_key()
def clean_keys(self):
utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
+ # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
+ # for later direct access to the nodes
+ def fetch_keys(self):
+ dir="./keys"
+ if not os.path.isdir(dir):
+ os.mkdir(dir)
+ prefix = 'root_ssh_key'
+ vservername=self.vservername
+ overall=True
+ for ext in [ 'pub', 'rsa' ] :
+ src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
+ dst="keys/%(vservername)s.%(ext)s"%locals()
+ if self.test_ssh.fetch(src,dst) != 0: overall=False
+ return overall
+
def sites (self):
return self.do_sites()
test_site.create_users()
return True
+ def clean_all_sites (self):
+ print 'auth_root',self.auth_root()
+ site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
+ for site_id in site_ids:
+ print 'Deleting site_id',site_id
+ self.apiserver.DeleteSite(self.auth_root(),site_id)
+
def nodes (self):
return self.do_nodes()
def clean_nodes (self):
test_node.create_node ()
return True
- # create nodegroups if needed, and populate
- # no need for a clean_nodegroups if we are careful enough
def nodegroups (self):
+ return self.do_nodegroups("add")
+ def clean_nodegroups (self):
+ return self.do_nodegroups("delete")
+
+ # create nodegroups if needed, and populate
+ def do_nodegroups (self, action="add"):
# 1st pass to scan contents
groups_dict = {}
for site_spec in self.plc_spec['sites']:
groups_dict[nodegroupname]=[]
groups_dict[nodegroupname].append(test_node.name())
auth=self.auth_root()
+ overall = True
for (nodegroupname,group_nodes) in groups_dict.iteritems():
- print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
- # first, check if the nodetagtype is here
- tag_types = self.apiserver.GetNodeTagTypes(auth,{'tagname':nodegroupname})
- if tag_types:
- tag_type_id = tag_types[0]['node_tag_type_id']
- print 'node-tag-type',nodegroupname,'already exists'
- else:
- tag_type_id = self.apiserver.AddNodeTagType(auth,
+ if action == "add":
+ print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
+ # first, check if the nodetagtype is here
+ tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
+ if tag_types:
+ tag_type_id = tag_types[0]['tag_type_id']
+ else:
+ tag_type_id = self.apiserver.AddTagType(auth,
{'tagname':nodegroupname,
'description': 'for nodegroup %s'%nodegroupname,
'category':'test',
'min_role_id':10})
- # create nodegroup
- nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
- if nodegroups:
- print 'nodegroup',nodegroupname,'already exists'
+ print 'located tag (type)',nodegroupname,'as',tag_type_id
+ # create nodegroup
+ nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
+ if not nodegroups:
+ self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
+ print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
+ # set node tag on all nodes, value='yes'
+ for nodename in group_nodes:
+ try:
+ self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
+ except:
+ traceback.print_exc()
+ print 'node',nodename,'seems to already have tag',nodegroupname
+ # check anyway
+ try:
+ expect_yes = self.apiserver.GetNodeTags(auth,
+ {'hostname':nodename,
+ 'tagname':nodegroupname},
+ ['tagvalue'])[0]['tagvalue']
+ if expect_yes != "yes":
+ print 'Mismatch node tag on node',nodename,'got',expect_yes
+ overall=False
+ except:
+ if not self.options.dry_run:
+ print 'Cannot find tag',nodegroupname,'on node',nodename
+ overall = False
else:
- self.apiserver.AddNodeGroup(auth,
- {'groupname': nodegroupname,
- 'node_tag_type_id': tag_type_id,
- 'tagvalue': 'yes'})
- # set node tag on all nodes, value='yes'
- overall = True
- for nodename in group_nodes:
- try:
- self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
- except:
- print 'node',nodename,'seems to already have tag',nodegroupname
- # check anyway
try:
- expect_yes = self.apiserver.GetNodeTags(
- auth,
- {'hostname':nodename,
- 'tagname':nodegroupname},
- ['tagvalue'])[0]['tagvalue']
- if expect_yes != "yes":
- print 'Mismatch node tag on node',nodename,'got',expect_yes
- overall=False
+ print 'cleaning nodegroup',nodegroupname
+ self.apiserver.DeleteNodeGroup(auth,nodegroupname)
except:
- print 'Cannot find tag',nodegroupname,'on node',nodename
- overall = False
+ traceback.print_exc()
+ overall=False
return overall
def all_hostnames (self) :
return hostnames
# gracetime : during the first <gracetime> minutes nothing gets printed
- def do_nodes_booted (self, minutes, gracetime,period=30):
+ def do_nodes_booted (self, minutes, gracetime,period=15):
if self.options.dry_run:
print 'dry_run'
return True
def nodes_booted(self):
return self.do_nodes_booted(minutes=20,gracetime=15)
- def do_nodes_ssh(self,minutes,gracetime,period=30):
+ def do_nodes_ssh(self,minutes,gracetime,period=15):
# compute timeout
timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
return True
def nodes_ssh(self):
- return self.do_nodes_ssh(minutes=6,gracetime=4)
+ return self.do_nodes_ssh(minutes=10,gracetime=5)
@node_mapper
def init_node (self): pass
@node_mapper
def export_qemu (self): pass
+ ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
+ def check_sanity_node (self):
+ return self.locate_first_node().check_sanity()
+ def check_sanity_sliver (self) :
+ return self.locate_first_sliver().check_sanity()
+
+ def check_sanity (self):
+ return self.check_sanity_node() and self.check_sanity_sliver()
+
+ ### initscripts
def do_check_initscripts(self):
overall = True
for slice_spec in self.plc_spec['slices']:
print 'deletion went wrong - probably did not exist'
return True
+ ### manage slices
def slices (self):
return self.do_slices()
@node_mapper
def start_node (self) : pass
- def all_sliver_objs (self):
- result=[]
- for slice_spec in self.plc_spec['slices']:
- slicename = slice_spec['slice_fields']['name']
- for nodename in slice_spec['nodenames']:
- result.append(self.locate_sliver_obj (nodename,slicename))
- return result
-
- def locate_sliver_obj (self,nodename,slicename):
- (site,node) = self.locate_node(nodename)
- slice = self.locate_slice (slicename)
- # build objects
- test_site = TestSite (self, site)
- test_node = TestNode (self, test_site,node)
- # xxx the slice site is assumed to be the node site - mhh - probably harmless
- test_slice = TestSlice (self, test_site, slice)
- return TestSliver (self, test_node, test_slice)
-
def check_tcp (self):
specs = self.plc_spec['tcp_test']
overall=True
if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
overall=False
return overall
-
+
+ def plcsh_stress_test (self):
+ # install the stress-test in the plc image
+ location = "/usr/share/plc_api/plcsh-stress-test.py"
+ remote="/vservers/%s/%s"%(self.vservername,location)
+ self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
+ command = location
+ command += " -- --check"
+ if self.options.small_test:
+ command += " --tiny"
+ return ( self.run_in_guest(command) == 0)
def gather_logs (self):
# (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
return True
def gather_var_logs (self):
+ utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
- utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
+ utils.system(command)
+ command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
utils.system(command)
def gather_nodes_var_logs (self):