1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
10 from TestUser import TestUser
11 from TestBoxQemu import TestBoxQemu
12 from TestSsh import TestSsh
13 from Completer import CompleterTask
15 class CompleterTaskNodeSsh(CompleterTask):
16 def __init__(self, hostname, qemuname, local_key, command=None,
17 boot_state="boot", expected=True, dry_run=False):
18 self.hostname = hostname
19 self.qemuname = qemuname
20 self.boot_state = boot_state
21 self.local_key = local_key
22 self.command = command if command is not None else "hostname;uname -a"
23 self.expected = expected
24 self.dry_run = dry_run
25 self.test_ssh = TestSsh(self.hostname, key=self.local_key)
26 def run(self, silent):
27 command = self.test_ssh.actual_command(self.command)
28 retcod = utils.system(command, silent=silent, dry_run=self.dry_run)
33 def failure_epilogue(self):
34 print("Cannot reach {} in {} mode".format(self.hostname, self.boot_state))
38 def __init__(self, test_plc, test_site, node_spec):
39 self.test_plc = test_plc
40 self.test_site = test_site
41 self.node_spec = node_spec
44 return self.node_spec['node_fields']['hostname']
47 return self.test_plc.options.dry_run
50 def is_qemu_model(model):
51 return model.find("qemu") >= 0
53 return TestNode.is_qemu_model(self.node_spec['node_fields']['model'])
56 def is_real_model(model):
57 return not TestNode.is_qemu_model(model)
59 return TestNode.is_real_model(self.node_spec['node_fields']['model'])
62 return self.test_plc.options.buildname
66 return "qemu-{}".format(self.name())
68 return "real-{}".format(self.name())
70 # this returns a hostname
76 return self.node_spec['host_box']
78 utils.header("WARNING : qemu nodes need a host box")
81 # this returns a TestBoxQemu instance - cached in .test_box_value
84 return self.test_box_value
86 self.test_box_value = TestBoxQemu(self.host_box(),self.buildname())
87 return self.test_box_value
89 def create_node(self):
90 ownername = self.node_spec['owner']
91 user_spec = self.test_site.locate_user(ownername)
92 test_user = TestUser(self.test_plc,self.test_site,user_spec)
93 userauth = test_user.auth()
94 utils.header("node {} created by user {}".format(self.name(), test_user.name()))
95 rootauth = self.test_plc.auth_root()
96 server = self.test_plc.apiserver
97 node_id = server.AddNode(userauth,
98 self.test_site.site_spec['site_fields']['login_base'],
99 self.node_spec['node_fields'])
100 # create as reinstall to avoid user confirmation
101 server.UpdateNode(userauth, self.name(), { 'boot_state' : 'reinstall' })
103 # you are supposed to make sure the tags exist
104 for tagname, tagvalue in self.node_spec['tags'].items():
105 server.AddNodeTag(userauth, node_id, tagname, tagvalue)
107 if not self.test_plc.has_addresses_api():
108 # print 'USING OLD INTERFACE'
109 # populate network interfaces - primary
110 server.AddInterface(userauth, self.name(),
111 self.node_spec['interface_fields'])
113 # print 'USING NEW INTERFACE with separate ip addresses'
114 # this is for setting the 'dns' stuff that now goes with the node
115 server.UpdateNode(userauth, self.name(), self.node_spec['node_fields_nint'])
116 interface_id = server.AddInterface(userauth, self.name(),self.node_spec['interface_fields_nint'])
117 server.AddIpAddress(userauth, interface_id, self.node_spec['ipaddress_fields'])
118 route_fields = self.node_spec['route_fields']
119 route_fields['interface_id'] = interface_id
120 server.AddRoute(userauth, node_id, self.node_spec['route_fields'])
122 # populate network interfaces - others
123 if 'extra_interfaces' in self.node_spec:
124 for interface in self.node_spec['extra_interfaces']:
125 server.AddInterface(userauth, self.name(), interface['interface_fields'])
126 if 'settings' in interface:
127 for attribute, value in interface['settings'].items():
128 # locate node network
129 interface = server.GetInterfaces( userauth,
130 {'ip' : interface['interface_fields']['ip']})[0]
131 interface_id = interface['interface_id']
132 # locate or create node network attribute type
134 interface_tagtype = server.GetTagTypes(userauth, {'name' : attribute})[0]
136 interface_tagtype = server.AddTagType(rootauth,{'category' : 'test',
137 'tagname' : attribute})
139 server.AddInterfaceTag(userauth, interface_id, attribute, value)
141 def delete_node(self):
142 # uses the right auth as far as poss.
144 ownername = self.node_spec['owner']
145 user_spec = self.test_site.locate_user(ownername)
146 test_user = TestUser(self.test_plc, self.test_site, user_spec)
147 auth = test_user.auth()
149 auth = self.test_plc.auth_root()
150 self.test_plc.apiserver.DeleteNode(auth, self.name())
152 # Do most of the stuff locally - will be pushed on host_box - *not* the plc - later if needed
153 def qemu_local_init(self):
154 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
155 utils.system("rm -rf {}".format(self.nodedir()))
156 utils.system("mkdir {}".format(self.nodedir()))
157 if not self.is_qemu():
159 return utils.system("rsync -v -a --exclude .svn template-qemu/ {}/"\
160 .format(self.nodedir())) == 0
163 "all nodes: invoke GetBootMedium and store result locally"
164 utils.header("Calling GetBootMedium for {}".format(self.name()))
165 # this would clearly belong in the config but, well ..
166 options = self.node_spec['bootmedium_options'] if 'bootmedium_options' in self.node_spec else []
167 encoded = self.test_plc.apiserver.GetBootMedium(
168 self.test_plc.auth_root(), self.name(), 'node-iso', '', options)
170 raise Exception('GetBootmedium failed')
172 filename = "{}/{}.iso".format(self.nodedir(), self.name())
173 utils.header('Storing boot medium into {}'.format(filename))
175 # xxx discovered with python3, but a long stading issue:
176 # encoded at this point is a str instead of a bytes
177 # Quick & dirty : we convert this explicitly to a bytearray
178 # Longer run : clearly it seems like the plcapi server side should
179 # tag its result with <base64></base64> rather than as a string
180 bencoded = str.encode(encoded)
182 print("Dry_run: skipped writing of iso image")
185 # with python3 we need to call decodestring here
186 with open(filename,'wb') as storage:
187 storage.write(base64.decodestring(bencoded))
190 def nodestate_reinstall(self):
191 "all nodes: mark PLCAPI boot_state as reinstall"
192 self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
193 self.name(),{'boot_state':'reinstall'})
196 def nodestate_upgrade(self):
197 "all nodes: mark PLCAPI boot_state as upgrade"
198 self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
199 self.name(),{'boot_state':'upgrade'})
202 def nodestate_safeboot(self):
203 "all nodes: mark PLCAPI boot_state as safeboot"
204 self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
205 self.name(),{'boot_state':'safeboot'})
208 def nodestate_boot(self):
209 "all nodes: mark PLCAPI boot_state as boot"
210 self.test_plc.apiserver.UpdateNode(self.test_plc.auth_root(),
211 self.name(),{'boot_state':'boot'})
214 def nodestate_show(self):
215 "all nodes: show PLCAPI boot_state"
217 print("Dry_run: skipped getting current node state")
219 state = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), self.name(), ['boot_state'])[0]['boot_state']
220 print("boot_state for {} : {}".format(self.name(), state))
223 def nodedistro_f14(self):
224 return self.nodedistro_set('f14')
225 def nodedistro_f18(self):
226 return self.nodedistro_set('f18')
227 def nodedistro_f20(self):
228 return self.nodedistro_set('f20')
229 def nodedistro_f21(self):
230 return self.nodedistro_set('f21')
231 def nodedistro_set(self, distro):
232 "set the fcdistro tag to distro, passed in arg"
233 self.test_plc.apiserver.SetNodeFcdistro(self.test_plc.auth_root(),
236 def nodeflavour_show(self):
237 "display the fcdistro tag - or flavour actually - of node"
239 print("Dry_run: would fetch node flavour")
241 flavour = self.test_plc.apiserver.GetNodeFlavour(self.test_plc.auth_root(),
243 print("Flavour for {} : {}".format(self.name(), flavour))
246 def nodeplain_set(self, plain):
247 " set bootstrapfs-plain tag on nodes"
248 self.test_plc.apiserver.SetNodePlainBootstrapfs(self.test_plc.auth_root(),
251 def nodeplain_on(self):
252 return self.nodeplain_set("True")
253 def nodeplain_off(self):
254 return self.nodeplain_set("")
255 def nodeplain_show(self):
256 "display bootstrapfs-plain tag"
258 print("Dry_run: would fetch node plain-bootstrapfs tag")
260 plain = self.test_plc.apiserver.GetNodePlainBootstrapfs(self.test_plc.auth_root(),
262 print("Plain bootstrapfs for {} is {}".format(self.name(), plain))
265 def qemu_local_config(self):
266 "all nodes: compute qemu config qemu.conf and store it locally"
267 if not self.is_qemu():
269 mac = self.node_spec['interface_fields']['mac']
270 hostname = self.node_spec['node_fields']['hostname']
271 ip = self.node_spec['interface_fields']['ip']
272 auth = self.test_plc.auth_root()
273 target_arch = self.test_plc.apiserver.GetPlcRelease(auth)['build']['target-arch']
274 conf_filename = "{}/qemu.conf".format(self.nodedir())
276 print("dry_run: skipped actual storage of qemu.conf")
278 utils.header('Storing qemu config for {} in {}'.format(self.name(), conf_filename))
279 with open(conf_filename,'w') as file:
280 file.write('MACADDR={}\n'.format(mac))
281 file.write('NODE_ISO={}.iso\n'.format(self.name()))
282 file.write('HOSTNAME={}\n'.format(hostname))
283 file.write('IP={}\n'.format(ip))
284 file.write('TARGET_ARCH={}\n'.format(target_arch))
287 def qemu_clean(self):
288 utils.header("Cleaning up qemu for host {} on box {}"\
289 .format(self.name(),self.test_box().hostname()))
290 dry_run = self.dry_run()
291 self.test_box().rmdir(self.nodedir(), dry_run=dry_run)
294 def qemu_export(self):
295 "all nodes: push local node-dep directory on the qemu box"
296 # if relevant, push the qemu area onto the host box
297 if self.test_box().is_local():
299 dry_run = self.dry_run()
300 utils.header("Cleaning any former sequel of {} on {}"\
301 .format(self.name(), self.host_box()))
302 utils.header("Transferring configuration files for node {} onto {}"\
303 .format(self.name(), self.host_box()))
304 return self.test_box().copy(self.nodedir(), recursive=True, dry_run=dry_run) == 0
306 def qemu_start(self):
307 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
308 model = self.node_spec['node_fields']['model']
309 #starting the Qemu nodes before
313 utils.header("TestNode.qemu_start : {} model {} taken as real node"\
314 .format(self.name(), model))
317 def qemu_timestamp(self):
318 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
319 test_box = self.test_box()
320 test_box.run_in_buildname("mkdir -p {}".format(self.nodedir()), dry_run=self.dry_run())
321 now = int(time.time())
322 return test_box.run_in_buildname("echo {:d} > {}/timestamp"\
323 .format(now, self.nodedir()), dry_run=self.dry_run()) == 0
325 def qemu_nodefamily(self):
326 "write nodefamily stamp in qemu working dir"
327 auth = self.test_plc.auth_root()
328 hostname = self.node_spec['node_fields']['hostname']
329 nodeflavour = self.test_plc.apiserver.GetNodeFlavour(auth, hostname)
332 nodedir = self.nodedir()
333 nodefamily = nodeflavour['nodefamily']
334 self.test_box().run_in_buildname("echo {nodefamily} > {nodedir}/nodefamily".format(**locals()))
337 def start_qemu(self):
338 test_box = self.test_box()
339 utils.header("Starting qemu node {} on {}".format(self.name(), test_box.hostname()))
341 test_box.run_in_buildname("{}/qemu-bridge-init start >> {}/log.txt"\
342 .format(self.nodedir(), self.nodedir()),
343 dry_run=self.dry_run())
344 # kick it off in background, as it would otherwise hang
345 test_box.run_in_buildname("{}/qemu-start-node 2>&1 >> {}/log.txt"\
346 .format(self.nodedir(), self.nodedir()))
349 utils.header("Listing qemu for host {} on box {}"\
350 .format(self.name(), self.test_box().hostname()))
351 command = "{}/qemu-kill-node -l {}".format(self.nodedir(), self.name())
352 self.test_box().run_in_buildname(command, dry_run=self.dry_run())
356 #Prepare the log file before killing the nodes
357 test_box = self.test_box()
358 # kill the right processes
359 utils.header("Stopping qemu for node {} on box {}"\
360 .format(self.name(), self.test_box().hostname()))
361 command = "{}/qemu-kill-node {}".format(self.nodedir(),self.name())
362 self.test_box().run_in_buildname(command, dry_run=self.dry_run())
365 def gather_qemu_logs(self):
366 if not self.is_qemu():
368 remote_log = "{}/log.txt".format(self.nodedir())
369 local_log = "logs/node.qemu.{}.txt".format(self.name())
370 self.test_box().test_ssh.fetch(remote_log,local_log,dry_run=self.dry_run())
372 def keys_clear_known_hosts(self):
373 "remove test nodes entries from the local known_hosts file"
374 TestSsh(self.name()).clear_known_hosts()
377 def create_test_ssh(self):
378 # get the plc's keys for entering the node
379 vservername = self.test_plc.vservername
380 ### # assuming we've run testplc.fetch_keys()
381 ### key = "keys/{vservername}.rsa".format(**locals())
382 # fetch_keys doesn't grab the root key anymore
383 key = "keys/key_admin.rsa"
384 return TestSsh(self.name(), buildname=self.buildname(), key=key)
386 def check_hooks(self):
387 extensions = [ 'py','pl','sh' ]
389 scripts = utils.locate_hooks_scripts('node '+self.name(), path,extensions)
391 for script in scripts:
392 if not self.check_hooks_script(script):
396 def check_hooks_script(self,local_script):
397 # push the script on the node's root context
398 script_name = os.path.basename(local_script)
399 utils.header("NODE hook {} ({})".format(script_name, self.name()))
400 test_ssh = self.create_test_ssh()
401 test_ssh.copy_home(local_script)
402 if test_ssh.run("./"+script_name) != 0:
403 utils.header("WARNING: node hooks check script {} FAILED (ignored)"\
404 .format(script_name))
408 utils.header("SUCCESS: node hook {} OK".format(script_name))
411 def has_libvirt(self):
412 test_ssh = self.create_test_ssh()
413 return test_ssh.run("rpm -q --quiet libvirt-client") == 0
415 def _check_system_slice(self, slicename, dry_run=False):
416 sitename = self.test_plc.plc_spec['settings']['PLC_SLICE_PREFIX']
417 vservername = "{}_{}".format(sitename, slicename)
418 test_ssh = self.create_test_ssh()
419 if self.has_libvirt():
420 utils.header("Checking system slice {} using virsh".format(slicename))
421 return test_ssh.run("virsh --connect lxc:// list | grep -q ' {} '".format(vservername),
422 dry_run = dry_run) == 0
425 utils.output_of(test_ssh.actual_command("cat /vservers/{}/etc/slicefamily 2> /dev/null")\
426 .format(vservername))
427 # get last line only as ssh pollutes the output
428 slicefamily = output.split("\n")[-1]
429 utils.header("Found slicefamily '{}'for slice {}".format(slicefamily,slicename))
432 utils.header("Checking system slice {} using vserver-stat".format(slicename))
433 return test_ssh.run("vserver-stat | grep {}".format(vservername), dry_run=dry_run) == 0