2 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
3 # Copyright (C) 2010 INRIA
5 # #################### history
7 # see also Substrate.readme
9 # This is a complete rewrite of TestResources/Tracker/Pool
10 # we don't use trackers anymore and just probe/sense the running
11 # boxes to figure out where we are
12 # in order to implement some fairness in the round-robin allocation scheme
13 # we need an indication of the 'age' of each running entity,
14 # hence the 'timestamp-*' steps in TestPlc
16 # this should be much more flexible:
17 # * supports several plc boxes
18 # * supports several qemu guests per host
19 # * no need to worry about tracker being in sync or not
21 # #################### howto use
23 # each site is to write its own LocalSubstrate.py,
24 # (see e.g. LocalSubstrate.inria.py)
25 # LocalSubstrate.py is expected to be in /root on the testmaster box
28 # . the vserver-capable boxes used for hosting myplcs
29 # . and their admissible load (max # of myplcs)
30 # . the pool of DNS-names and IP-addresses available for myplcs
32 # . the kvm-qemu capable boxes to host qemu instances
33 # . and their admissible load (max # of myplcs)
34 # . the pool of DNS-names and IP-addresses available for nodes
36 # #################### implem. note
38 # this model relies on 'sensing' the substrate,
39 # i.e. probing all the boxes for their running instances of vservers and qemu
40 # this is how we get rid of tracker inconsistencies
41 # however there is a 'black hole' between the time where a given address is
42 # allocated and when it actually gets used/pingable
43 # this is why we still need a shared knowledge among running tests
44 # in a file named /root/starting
45 # this is connected to the Pool class
47 # ####################
56 from optparse import OptionParser
59 from TestSsh import TestSsh
60 from TestMapper import TestMapper
62 def header (message,banner=True):
63 if not message: return
64 if banner: print "===============",
68 def timestamp_sort(o1,o2): return o1.timestamp-o2.timestamp
70 def short_hostname (hostname):
71 return hostname.split('.')[0]
74 # the place were other test instances tell about their not-yet-started
75 # instances, that go undetected through sensing
78 location='/root/starting'
83 try: self.tuples=[line.strip().split('@')
84 for line in file(Starting.location).readlines()]
85 except: self.tuples=[]
89 return [ x for (x,_) in self.tuples ]
91 def add (self, vname, bname):
92 if not vname in self.vnames():
93 file(Starting.location,'a').write("%s@%s\n"%(vname,bname))
95 def delete_vname (self, vname):
97 if vname in self.vnames():
98 f=file(Starting.location,'w')
99 for (v,b) in self.tuples:
100 if v != vname: f.write("%s@%s\n"%(v,b))
105 # allows to pick an available IP among a pool
106 # input is expressed as a list of tuples (hostname,ip,user_data)
107 # that can be searched iteratively for a free slot
109 # pool = [ (hostname1,user_data1),
110 # (hostname2,user_data2),
111 # (hostname3,user_data2),
112 # (hostname4,user_data4) ]
113 # assuming that ip1 and ip3 are taken (pingable), then we'd get
115 # pool.next_free() -> entry2
116 # pool.next_free() -> entry4
117 # pool.next_free() -> None
118 # that is, even if ip2 is not busy/pingable when the second next_free() is issued
121 def __init__ (self,hostname,userdata):
122 self.hostname=hostname
123 self.userdata=userdata
124 # slot holds 'busy' or 'free' or 'mine' or 'starting' or None
125 # 'mine' is for our own stuff, 'starting' from the concurrent tests
130 return "Pooled %s (%s) -> %s"%(self.hostname,self.userdata, self.status)
133 if self.status==None: return '?'
134 elif self.status=='busy': return '+'
135 elif self.status=='free': return '-'
136 elif self.status=='mine': return 'M'
137 elif self.status=='starting': return 'S'
140 if self.ip: return self.ip
141 ip=socket.gethostbyname(self.hostname)
147 def __init__ (self, tuples,message, substrate):
148 self.pool_items= [ PoolItem (hostname,userdata) for (hostname,userdata) in tuples ]
150 # where to send notifications upon load_starting
151 self.substrate=substrate
153 def list (self, verbose=False):
154 for i in self.pool_items: print i.line()
158 for i in self.pool_items: line += ' ' + i.char()
161 def _item (self, hostname):
162 for i in self.pool_items:
163 if i.hostname==hostname: return i
164 raise Exception ("Could not locate hostname %s in pool %s"%(hostname,self.message))
166 def retrieve_userdata (self, hostname):
167 return self._item(hostname).userdata
169 def get_ip (self, hostname):
170 try: return self._item(hostname).get_ip()
171 except: return socket.gethostbyname(hostname)
173 def set_mine (self, hostname):
175 self._item(hostname).status='mine'
177 print 'WARNING: host %s not found in IP pool %s'%(hostname,self.message)
179 def next_free (self):
180 for i in self.pool_items:
181 if i.status == 'free':
183 return (i.hostname,i.userdata)
187 # we have a starting instance of our own
188 def add_starting (self, vname, bname):
189 Starting().add(vname,bname)
190 for i in self.pool_items:
191 if i.hostname==vname: i.status='mine'
193 # load the starting instances from the common file
194 # remember that might be ours
195 # return the list of (vname,bname) that are not ours
196 def load_starting (self):
200 for (v,b) in starting.tuples:
201 for i in self.pool_items:
202 if i.hostname==v and i.status=='free':
204 new_tuples.append( (v,b,) )
207 def release_my_starting (self):
208 for i in self.pool_items:
210 Starting().delete_vname (i.hostname)
216 for item in self.pool_items:
217 if item.status is not None:
220 if self.check_ping (item.hostname):
228 print 'Sensing IP pool',self.message,
231 for (vname,bname) in self.load_starting():
232 self.substrate.add_starting_dummy (bname, vname)
233 print 'After starting: IP pool'
235 # OS-dependent ping option (support for macos, for convenience)
236 ping_timeout_option = None
237 # returns True when a given hostname/ip responds to ping
238 def check_ping (self,hostname):
239 if not Pool.ping_timeout_option:
240 (status,osname) = commands.getstatusoutput("uname -s")
242 raise Exception, "TestPool: Cannot figure your OS name"
243 if osname == "Linux":
244 Pool.ping_timeout_option="-w"
245 elif osname == "Darwin":
246 Pool.ping_timeout_option="-t"
248 command="ping -c 1 %s 1 %s"%(Pool.ping_timeout_option,hostname)
249 (status,output) = commands.getstatusoutput(command)
254 def __init__ (self,hostname):
255 self.hostname=hostname
257 def shortname (self):
258 return short_hostname(self.hostname)
259 def test_ssh (self): return TestSsh(self.hostname,username='root',unknown_host=False)
260 def reboot (self, options):
261 self.test_ssh().run("shutdown -r now",message="Rebooting %s"%self.hostname,
262 dry_run=options.dry_run)
264 def hostname_fedora (self): return "%s [%s]"%(self.hostname,self.fedora())
266 separator = "===composite==="
269 # take this chance to gather useful stuff
272 if self._probed is not None: return self._probed
273 composite_command = [ ]
274 composite_command += [ "hostname" ]
275 composite_command += [ ";" , "echo", Box.separator , ";" ]
276 composite_command += [ "uptime" ]
277 composite_command += [ ";" , "echo", Box.separator , ";" ]
278 composite_command += [ "uname", "-r"]
279 composite_command += [ ";" , "echo", Box.separator , ";" ]
280 composite_command += [ "cat" , "/etc/fedora-release" ]
282 # due to colons and all, this is going wrong on the local box (typically testmaster)
283 # I am reluctant to change TestSsh as it might break all over the place, so
284 if self.test_ssh().is_local():
285 probe_argv = [ "bash", "-c", " ".join (composite_command) ]
287 probe_argv=self.test_ssh().actual_argv(composite_command)
288 composite=self.backquote ( probe_argv, trash_err=True )
289 self._hostname = self._uptime = self._uname = self._fedora = "** Unknown **"
291 print "root@%s unreachable"%self.hostname
295 pieces = composite.split(Box.separator)
296 pieces = [ x.strip() for x in pieces ]
297 [self._hostname, self._uptime, self._uname, self._fedora] = pieces
299 self._uptime = ', '.join([ x.strip() for x in self._uptime.split(',')[2:]])
300 self._fedora = self._fedora.replace("Fedora release ","f").split(" ")[0]
303 print 'BEG issue with pieces',pieces
304 traceback.print_exc()
305 print 'END issue with pieces',pieces
306 self._probed=self._hostname
309 # use argv=['bash','-c',"the command line"]
312 if hasattr(self,'_uptime') and self._uptime: return self._uptime
313 return '*unprobed* uptime'
316 if hasattr(self,'_uname') and self._uname: return self._uname
317 return '*unprobed* uname'
320 if hasattr(self,'_fedora') and self._fedora: return self._fedora
321 return '*unprobed* fedora'
323 def run(self,argv,message=None,trash_err=False,dry_run=False):
331 return subprocess.call(argv)
333 return subprocess.call(argv,stderr=file('/dev/null','w'))
335 def run_ssh (self, argv, message, trash_err=False, dry_run=False):
336 ssh_argv = self.test_ssh().actual_argv(argv)
337 result=self.run (ssh_argv, message, trash_err, dry_run=dry_run)
339 print "WARNING: failed to run %s on %s"%(" ".join(argv),self.hostname)
342 def backquote (self, argv, trash_err=False):
343 # print 'running backquote',argv
345 result= subprocess.Popen(argv,stdout=subprocess.PIPE).communicate()[0]
347 result= subprocess.Popen(argv,stdout=subprocess.PIPE,stderr=file('/dev/null','w')).communicate()[0]
350 # if you have any shell-expanded arguments like *
351 # and if there's any chance the command is adressed to the local host
352 def backquote_ssh (self, argv, trash_err=False):
353 if not self.probe(): return ''
354 return self.backquote( self.test_ssh().actual_argv(argv), trash_err)
356 ############################################################
358 def __init__ (self, buildname, pid, buildbox):
359 self.buildname=buildname
360 self.buildbox=buildbox
363 def add_pid(self,pid):
364 self.pids.append(pid)
367 return "== %s == (pids=%r)"%(self.buildname,self.pids)
369 class BuildBox (Box):
370 def __init__ (self,hostname):
371 Box.__init__(self,hostname)
372 self.build_instances=[]
374 def add_build (self,buildname,pid):
375 for build in self.build_instances:
376 if build.buildname==buildname:
379 self.build_instances.append(BuildInstance(buildname, pid, self))
381 def list(self, verbose=False):
382 if not self.build_instances:
383 header ('No build process on %s (%s)'%(self.hostname_fedora(),self.uptime()))
385 header ("Builds on %s (%s)"%(self.hostname_fedora(),self.uptime()))
386 for b in self.build_instances:
387 header (b.line(),banner=False)
389 def reboot (self, options):
391 Box.reboot(self,options)
393 command=['pkill','vbuild']
394 self.run_ssh(command,"Terminating vbuild processes",dry_run=options.dry_run)
396 # inspect box and find currently running builds
397 matcher=re.compile("\s*(?P<pid>[0-9]+).*-[bo]\s+(?P<buildname>[^\s]+)(\s|\Z)")
398 matcher_building_vm=re.compile("\s*(?P<pid>[0-9]+).*init-vserver.*\s+(?P<buildname>[^\s]+)\s*\Z")
399 def sense(self, options):
401 pids=self.backquote_ssh(['pgrep','vbuild'],trash_err=True)
403 command=['ps','-o','pid,command'] + [ pid for pid in pids.split("\n") if pid]
404 ps_lines=self.backquote_ssh (command).split('\n')
405 for line in ps_lines:
406 if not line.strip() or line.find('PID')>=0: continue
407 m=BuildBox.matcher.match(line)
409 date=time.strftime('%Y-%m-%d',time.localtime(time.time()))
410 buildname=m.group('buildname').replace('@DATE@',date)
411 self.add_build (buildname,m.group('pid'))
413 m=BuildBox.matcher_building_vm.match(line)
415 # buildname is expansed here
416 self.add_build (buildname,m.group('pid'))
418 header('BuildBox.sense: command %r returned line that failed to match'%command)
419 header(">>%s<<"%line)
421 ############################################################
423 def __init__ (self, plcbox):
428 def set_timestamp (self,timestamp): self.timestamp=timestamp
429 def set_now (self): self.timestamp=int(time.time())
430 def pretty_timestamp (self): return time.strftime("%Y-%m-%d:%H-%M",time.localtime(self.timestamp))
432 class PlcVsInstance (PlcInstance):
433 def __init__ (self, plcbox, vservername, ctxid):
434 PlcInstance.__init__(self,plcbox)
435 self.vservername=vservername
439 return self.vservername.split('-')[-1]
440 def buildname (self):
441 return self.vservername.rsplit('-',2)[0]
444 msg="== %s =="%(self.vplcname())
445 msg += " [=%s]"%self.vservername
446 if self.ctxid==0: msg+=" not (yet?) running"
447 else: msg+=" (ctx=%s)"%self.ctxid
448 if self.timestamp: msg += " @ %s"%self.pretty_timestamp()
449 else: msg += " *unknown timestamp*"
453 msg="vserver stopping %s on %s"%(self.vservername,self.plc_box.hostname)
454 self.plc_box.run_ssh(['vserver',self.vservername,'stop'],msg)
455 self.plc_box.forget(self)
457 class PlcLxcInstance (PlcInstance):
458 # does lxc have a context id of any kind ?
459 def __init__ (self, plcbox, lxcname, pid):
460 PlcInstance.__init__(self, plcbox)
461 self.lxcname = lxcname
465 return self.lxcname.split('-')[-1]
466 def buildname (self):
467 return self.lxcname.rsplit('-',2)[0]
470 msg="== %s =="%(self.vplcname())
471 msg += " [=%s]"%self.lxcname
472 if self.pid==-1: msg+=" not (yet?) running"
473 else: msg+=" (pid=%s)"%self.pid
474 if self.timestamp: msg += " @ %s"%self.pretty_timestamp()
475 else: msg += " *unknown timestamp*"
479 command="rsync lxc-driver.sh %s:/root"%self.plc_box.hostname
480 commands.getstatusoutput(command)
481 msg="lxc container stopping %s on %s"%(self.lxcname,self.plc_box.hostname)
482 self.plc_box.run_ssh(['/root/lxc-driver.sh','-c','stop_lxc','-n',self.lxcname],msg)
483 self.plc_box.forget(self)
487 def __init__ (self, hostname, max_plcs):
488 Box.__init__(self,hostname)
489 self.plc_instances=[]
490 self.max_plcs=max_plcs
492 def free_slots (self):
493 return self.max_plcs - len(self.plc_instances)
495 # fill one slot even though this one is not started yet
496 def add_dummy (self, plcname):
497 dummy=PlcVsInstance(self,'dummy_'+plcname,0)
499 self.plc_instances.append(dummy)
501 def forget (self, plc_instance):
502 self.plc_instances.remove(plc_instance)
504 def reboot (self, options):
506 Box.reboot(self,options)
508 self.soft_reboot (options)
510 def list(self, verbose=False):
511 if not self.plc_instances:
512 header ('No plc running on %s'%(self.line()))
514 header ("Active plc VMs on %s"%self.line())
515 self.plc_instances.sort(timestamp_sort)
516 for p in self.plc_instances:
517 header (p.line(),banner=False)
520 class PlcVsBox (PlcBox):
522 def add_vserver (self,vservername,ctxid):
523 for plc in self.plc_instances:
524 if plc.vservername==vservername:
525 header("WARNING, duplicate myplc %s running on %s"%\
526 (vservername,self.hostname),banner=False)
528 self.plc_instances.append(PlcVsInstance(self,vservername,ctxid))
531 msg="%s [max=%d,free=%d, VS-based] (%s)"%(self.hostname_fedora(), self.max_plcs,self.free_slots(),self.uname())
534 def plc_instance_by_vservername (self, vservername):
535 for p in self.plc_instances:
536 if p.vservername==vservername: return p
539 def soft_reboot (self, options):
540 self.run_ssh(['service','util-vserver','stop'],"Stopping all running vservers on %s"%(self.hostname,),
541 dry_run=options.dry_run)
543 def sense (self, options):
545 # try to find fullname (vserver_stat truncates to a ridiculously short name)
546 # fetch the contexts for all vservers on that box
547 map_command=['grep','.','/etc/vservers/*/context','/dev/null',]
548 context_map=self.backquote_ssh (map_command)
549 # at this point we have a set of lines like
550 # /etc/vservers/2010.01.20--k27-f12-32-vplc03/context:40144
552 for map_line in context_map.split("\n"):
553 if not map_line: continue
554 [path,xid] = map_line.split(':')
555 ctx_dict[xid]=os.path.basename(os.path.dirname(path))
556 # at this point ctx_id maps context id to vservername
558 command=['vserver-stat']
559 vserver_stat = self.backquote_ssh (command)
560 for vserver_line in vserver_stat.split("\n"):
561 if not vserver_line: continue
562 context=vserver_line.split()[0]
563 if context=="CTX": continue
565 longname=ctx_dict[context]
566 self.add_vserver(longname,context)
568 print 'WARNING: found ctx %s in vserver_stat but was unable to figure a corresp. vserver'%context
571 running_vsnames = [ i.vservername for i in self.plc_instances ]
572 command= ['grep','.']
573 command += ['/vservers/%s.timestamp'%vs for vs in running_vsnames]
574 command += ['/dev/null']
575 ts_lines=self.backquote_ssh(command,trash_err=True).split('\n')
576 for ts_line in ts_lines:
577 if not ts_line.strip(): continue
578 # expect /vservers/<vservername>.timestamp:<timestamp>
580 (ts_file,timestamp)=ts_line.split(':')
581 ts_file=os.path.basename(ts_file)
582 (vservername,_)=os.path.splitext(ts_file)
583 timestamp=int(timestamp)
584 p=self.plc_instance_by_vservername(vservername)
586 print 'WARNING zombie plc',self.hostname,ts_line
587 print '... was expecting',vservername,'in',[i.vservername for i in self.plc_instances]
589 p.set_timestamp(timestamp)
590 except: print 'WARNING, could not parse ts line',ts_line
593 class PlcLxcBox (PlcBox):
595 def add_lxc (self,lxcname,pid):
596 for plc in self.plc_instances:
597 if plc.lxcname==lxcname:
598 header("WARNING, duplicate myplc %s running on %s"%\
599 (lxcname,self.hostname),banner=False)
601 self.plc_instances.append(PlcLxcInstance(self,lxcname,pid))
604 # a line describing the box
606 return "%s [max=%d,free=%d, LXC-based] (%s)"%(self.hostname_fedora(), self.max_plcs,self.free_slots(),
609 def plc_instance_by_lxcname (self, lxcname):
610 for p in self.plc_instances:
611 if p.lxcname==lxcname: return p
614 # essentially shutdown all running containers
615 def soft_reboot (self, options):
616 command="rsync lxc-driver.sh %s:/root"%self.hostname
617 commands.getstatusoutput(command)
618 self.run_ssh(['/root/lxc-driver.sh','-c','stop_all'],"Stopping all running lxc containers on %s"%(self.hostname,),
619 dry_run=options.dry_run)
622 # sense is expected to fill self.plc_instances with PlcLxcInstance's
623 # to describe the currently running VM's
624 # as well as to call self.get_uname() once
625 def sense (self, options):
627 command="rsync lxc-driver.sh %s:/root"%self.hostname
628 commands.getstatusoutput(command)
629 command=['/root/lxc-driver.sh','-c','sense_all']
630 lxc_stat = self.backquote_ssh (command)
631 for lxc_line in lxc_stat.split("\n"):
632 if not lxc_line: continue
633 lxcname=lxc_line.split(";")[0]
634 pid=lxc_line.split(";")[1]
635 timestamp=lxc_line.split(";")[2]
636 self.add_lxc(lxcname,pid)
637 timestamp=int(timestamp)
638 p=self.plc_instance_by_lxcname(lxcname)
640 print 'WARNING zombie plc',self.hostname,lxcname
641 print '... was expecting',lxcname,'in',[i.lxcname for i in self.plc_instances]
643 p.set_timestamp(timestamp)
645 ############################################################
647 def __init__ (self, nodename, pid, qemubox):
648 self.nodename=nodename
650 self.qemu_box=qemubox
655 def set_buildname (self,buildname): self.buildname=buildname
656 def set_timestamp (self,timestamp): self.timestamp=timestamp
657 def set_now (self): self.timestamp=int(time.time())
658 def pretty_timestamp (self): return time.strftime("%Y-%m-%d:%H-%M",time.localtime(self.timestamp))
661 msg = "== %s =="%(short_hostname(self.nodename))
662 msg += " [=%s]"%self.buildname
663 if self.pid: msg += " (pid=%s)"%self.pid
664 else: msg += " not (yet?) running"
665 if self.timestamp: msg += " @ %s"%self.pretty_timestamp()
666 else: msg += " *unknown timestamp*"
671 print "cannot kill qemu %s with pid==0"%self.nodename
673 msg="Killing qemu %s with pid=%s on box %s"%(self.nodename,self.pid,self.qemu_box.hostname)
674 self.qemu_box.run_ssh(['kill',"%s"%self.pid],msg)
675 self.qemu_box.forget(self)
679 def __init__ (self, hostname, max_qemus):
680 Box.__init__(self,hostname)
681 self.qemu_instances=[]
682 self.max_qemus=max_qemus
684 def add_node (self,nodename,pid):
685 for qemu in self.qemu_instances:
686 if qemu.nodename==nodename:
687 header("WARNING, duplicate qemu %s running on %s"%\
688 (nodename,self.hostname), banner=False)
690 self.qemu_instances.append(QemuInstance(nodename,pid,self))
692 def forget (self, qemu_instance):
693 self.qemu_instances.remove(qemu_instance)
695 # fill one slot even though this one is not started yet
696 def add_dummy (self, nodename):
697 dummy=QemuInstance('dummy_'+nodename,0,self)
699 self.qemu_instances.append(dummy)
702 return "%s [max=%d,free=%d] (%s) %s"%(
703 self.hostname_fedora(), self.max_qemus,self.free_slots(),
704 self.uptime(),self.driver())
706 def list(self, verbose=False):
707 if not self.qemu_instances:
708 header ('No qemu on %s'%(self.line()))
710 header ("Qemus on %s"%(self.line()))
711 self.qemu_instances.sort(timestamp_sort)
712 for q in self.qemu_instances:
713 header (q.line(),banner=False)
715 def free_slots (self):
716 return self.max_qemus - len(self.qemu_instances)
719 if hasattr(self,'_driver') and self._driver: return self._driver
720 return '*undef* driver'
722 def qemu_instance_by_pid (self,pid):
723 for q in self.qemu_instances:
724 if q.pid==pid: return q
727 def qemu_instance_by_nodename_buildname (self,nodename,buildname):
728 for q in self.qemu_instances:
729 if q.nodename==nodename and q.buildname==buildname:
733 def reboot (self, options):
735 Box.reboot(self,options)
737 self.run_ssh(['pkill','qemu'],"Killing qemu instances",
738 dry_run=options.dry_run)
740 matcher=re.compile("\s*(?P<pid>[0-9]+).*-cdrom\s+(?P<nodename>[^\s]+)\.iso")
741 def sense(self, options):
743 modules=self.backquote_ssh(['lsmod']).split('\n')
744 self._driver='*NO kqemu/kvm_intel MODULE LOADED*'
745 for module in modules:
746 if module.find('kqemu')==0:
747 self._driver='kqemu module loaded'
748 # kvm might be loaded without kvm_intel (we dont have AMD)
749 elif module.find('kvm_intel')==0:
750 self._driver='kvm_intel OK'
751 ########## find out running pids
752 pids=self.backquote_ssh(['pgrep','qemu'])
754 command=['ps','-o','pid,command'] + [ pid for pid in pids.split("\n") if pid]
755 ps_lines = self.backquote_ssh (command).split("\n")
756 for line in ps_lines:
757 if not line.strip() or line.find('PID') >=0 : continue
758 m=QemuBox.matcher.match(line)
760 self.add_node (m.group('nodename'),m.group('pid'))
762 header('QemuBox.sense: command %r returned line that failed to match'%command)
763 header(">>%s<<"%line)
764 ########## retrieve alive instances and map to build
766 command=['grep','.','/vservers/*/*/qemu.pid','/dev/null']
767 pid_lines=self.backquote_ssh(command,trash_err=True).split('\n')
768 for pid_line in pid_lines:
769 if not pid_line.strip(): continue
770 # expect <build>/<nodename>/qemu.pid:<pid>pid
772 (_,__,buildname,nodename,tail)=pid_line.split('/')
773 (_,pid)=tail.split(':')
774 q=self.qemu_instance_by_pid (pid)
776 q.set_buildname(buildname)
777 live_builds.append(buildname)
778 except: print 'WARNING, could not parse pid line',pid_line
779 # retrieve timestamps
780 if not live_builds: return
781 command= ['grep','.']
782 command += ['/vservers/%s/*/timestamp'%b for b in live_builds]
783 command += ['/dev/null']
784 ts_lines=self.backquote_ssh(command,trash_err=True).split('\n')
785 for ts_line in ts_lines:
786 if not ts_line.strip(): continue
787 # expect <build>/<nodename>/timestamp:<timestamp>
789 (_,__,buildname,nodename,tail)=ts_line.split('/')
790 nodename=nodename.replace('qemu-','')
791 (_,timestamp)=tail.split(':')
792 timestamp=int(timestamp)
793 q=self.qemu_instance_by_nodename_buildname(nodename,buildname)
795 print 'WARNING zombie qemu',self.hostname,ts_line
796 print '... was expecting (',short_hostname(nodename),buildname,') in',\
797 [ (short_hostname(i.nodename),i.buildname) for i in self.qemu_instances ]
799 q.set_timestamp(timestamp)
800 except: print 'WARNING, could not parse ts line',ts_line
804 def __init__ (self, buildname, pid=0):
806 if pid!=0: self.pid.append(pid)
807 self.buildname=buildname
814 def set_timestamp (self,timestamp): self.timestamp=timestamp
815 def set_now (self): self.timestamp=int(time.time())
816 def pretty_timestamp (self): return time.strftime("%Y-%m-%d:%H-%M",time.localtime(self.timestamp))
818 def is_running (self): return len(self.pids) != 0
820 def add_pid (self,pid):
821 self.pids.append(pid)
822 def set_broken (self, plcindex, step):
823 self.broken_steps.append ( (plcindex, step,) )
827 if self.pids: double='*'+double[1]
828 if self.broken_steps: double=double[0]+'B'
829 msg = " %s %s =="%(double,self.buildname)
830 if not self.pids: pass
831 elif len(self.pids)==1: msg += " (pid=%s)"%self.pids[0]
832 else: msg += " !!!pids=%s!!!"%self.pids
833 msg += " @%s"%self.pretty_timestamp()
834 if self.broken_steps:
835 # sometimes we have an empty plcindex
836 msg += " [BROKEN=" + " ".join( [ "%s@%s"%(s,i) if i else s for (i,s) in self.broken_steps ] ) + "]"
840 def __init__ (self,hostname):
841 Box.__init__(self,hostname)
843 self.test_instances=[]
845 def reboot (self, options):
846 # can't reboot a vserver VM
847 self.run_ssh (['pkill','run_log'],"Terminating current runs",
848 dry_run=options.dry_run)
849 self.run_ssh (['rm','-f',Starting.location],"Cleaning %s"%Starting.location,
850 dry_run=options.dry_run)
852 def get_test (self, buildname):
853 for i in self.test_instances:
854 if i.buildname==buildname: return i
856 # we scan ALL remaining test results, even the ones not running
857 def add_timestamp (self, buildname, timestamp):
858 i=self.get_test(buildname)
860 i.set_timestamp(timestamp)
862 i=TestInstance(buildname,0)
863 i.set_timestamp(timestamp)
864 self.test_instances.append(i)
866 def add_running_test (self, pid, buildname):
867 i=self.get_test(buildname)
869 self.test_instances.append (TestInstance (buildname,pid))
872 print "WARNING: 2 concurrent tests run on same build %s"%buildname
875 def add_broken (self, buildname, plcindex, step):
876 i=self.get_test(buildname)
878 i=TestInstance(buildname)
879 self.test_instances.append(i)
880 i.set_broken(plcindex, step)
882 matcher_proc=re.compile (".*/proc/(?P<pid>[0-9]+)/cwd.*/root/(?P<buildname>[^/]+)$")
883 matcher_grep=re.compile ("/root/(?P<buildname>[^/]+)/logs/trace.*:TRACE:\s*(?P<plcindex>[0-9]+).*step=(?P<step>\S+).*")
884 matcher_grep_missing=re.compile ("grep: /root/(?P<buildname>[^/]+)/logs/trace: No such file or directory")
885 def sense (self, options):
887 self.starting_ips=[x for x in self.backquote_ssh(['cat',Starting.location], trash_err=True).strip().split('\n') if x]
889 # scan timestamps on all tests
890 # this is likely to not invoke ssh so we need to be a bit smarter to get * expanded
891 # xxx would make sense above too
892 command=['bash','-c',"grep . /root/*/timestamp /dev/null"]
893 ts_lines=self.backquote_ssh(command,trash_err=True).split('\n')
894 for ts_line in ts_lines:
895 if not ts_line.strip(): continue
896 # expect /root/<buildname>/timestamp:<timestamp>
898 (ts_file,timestamp)=ts_line.split(':')
899 ts_file=os.path.dirname(ts_file)
900 buildname=os.path.basename(ts_file)
901 timestamp=int(timestamp)
902 t=self.add_timestamp(buildname,timestamp)
903 except: print 'WARNING, could not parse ts line',ts_line
905 # let's try to be robust here -- tests that fail very early like e.g.
906 # "Cannot make space for a PLC instance: vplc IP pool exhausted", that occurs as part of provision
907 # will result in a 'trace' symlink to an inexisting 'trace-<>.txt' because no step has gone through
908 # simple 'trace' sohuld exist though as it is created by run_log
909 command=['bash','-c',"grep KO /root/*/logs/trace /dev/null 2>&1" ]
910 trace_lines=self.backquote_ssh (command).split('\n')
911 for line in trace_lines:
912 if not line.strip(): continue
913 m=TestBox.matcher_grep_missing.match(line)
915 buildname=m.group('buildname')
916 self.add_broken(buildname,'','NO STEP DONE')
918 m=TestBox.matcher_grep.match(line)
920 buildname=m.group('buildname')
921 plcindex=m.group('plcindex')
923 self.add_broken(buildname,plcindex, step)
925 header("TestBox.sense: command %r returned line that failed to match\n%s"%(command,line))
926 header(">>%s<<"%line)
928 pids = self.backquote_ssh (['pgrep','run_log'],trash_err=True)
930 command=['ls','-ld'] + ["/proc/%s/cwd"%pid for pid in pids.split("\n") if pid]
931 ps_lines=self.backquote_ssh (command).split('\n')
932 for line in ps_lines:
933 if not line.strip(): continue
934 m=TestBox.matcher_proc.match(line)
937 buildname=m.group('buildname')
938 self.add_running_test(pid, buildname)
940 header("TestBox.sense: command %r returned line that failed to match\n%s"%(command,line))
941 header(">>%s<<"%line)
945 return self.hostname_fedora()
947 def list (self, verbose=False):
948 # verbose shows all tests
950 instances = self.test_instances
953 instances = [ i for i in self.test_instances if i.is_running() ]
957 header ("No %s on %s"%(msg,self.line()))
959 header ("%s on %s"%(msg,self.line()))
960 instances.sort(timestamp_sort)
961 for i in instances: print i.line()
962 # show 'starting' regardless of verbose
963 if self.starting_ips:
964 header ("Starting IP addresses on %s"%self.line())
965 self.starting_ips.sort()
966 for starting in self.starting_ips: print starting
968 header ("Empty 'starting' on %s"%self.line())
970 ############################################################
975 def __init__ (self, plcs_on_vs=True, plcs_on_lxc=False):
976 self.options=Options()
977 self.options.dry_run=False
978 self.options.verbose=False
979 self.options.reboot=False
980 self.options.soft=False
981 self.test_box = TestBox (self.test_box_spec())
982 self.build_boxes = [ BuildBox(h) for h in self.build_boxes_spec() ]
983 # for compat with older LocalSubstrate
985 self.plc_vs_boxes = [ PlcVsBox (h,m) for (h,m) in self.plc_vs_boxes_spec ()]
986 self.plc_lxc_boxes = [ PlcLxcBox (h,m) for (h,m) in self.plc_lxc_boxes_spec ()]
988 self.plc_vs_boxes = [ PlcVsBox (h,m) for (h,m) in self.plc_boxes_spec ()]
989 self.plc_lxc_boxes = [ ]
990 self.qemu_boxes = [ QemuBox (h,m) for (h,m) in self.qemu_boxes_spec ()]
993 self.vplc_pool = Pool (self.vplc_ips(),"for vplcs",self)
994 self.vnode_pool = Pool (self.vnode_ips(),"for vnodes",self)
996 self.rescope (plcs_on_vs=plcs_on_vs, plcs_on_lxc=plcs_on_lxc)
998 # which plc boxes are we interested in ?
999 def rescope (self, plcs_on_vs, plcs_on_lxc):
1001 if plcs_on_vs: self.plc_boxes += self.plc_vs_boxes
1002 if plcs_on_lxc: self.plc_boxes += self.plc_lxc_boxes
1003 self.default_boxes = self.plc_boxes + self.qemu_boxes
1004 self.all_boxes = self.build_boxes + [ self.test_box ] + self.plc_boxes + self.qemu_boxes
1006 def summary_line (self):
1008 msg += " %d vp"%len(self.plc_vs_boxes)
1009 msg += " %d xp"%len(self.plc_lxc_boxes)
1010 msg += " %d tried plc boxes"%len(self.plc_boxes)
1014 def fqdn (self, hostname):
1015 if hostname.find('.')<0: return "%s.%s"%(hostname,self.domain())
1018 # return True if actual sensing takes place
1019 def sense (self,force=False):
1020 if self._sensed and not force: return False
1021 print 'Sensing local substrate...',
1022 for b in self.default_boxes: b.sense(self.options)
1027 def list (self, verbose=False):
1028 for b in self.default_boxes:
1031 def add_dummy_plc (self, plc_boxname, plcname):
1032 for pb in self.plc_boxes:
1033 if pb.hostname==plc_boxname:
1034 pb.add_dummy(plcname)
1036 def add_dummy_qemu (self, qemu_boxname, qemuname):
1037 for qb in self.qemu_boxes:
1038 if qb.hostname==qemu_boxname:
1039 qb.add_dummy(qemuname)
1042 def add_starting_dummy (self, bname, vname):
1043 return self.add_dummy_plc (bname, vname) or self.add_dummy_qemu (bname, vname)
1046 def provision (self,plcs,options):
1048 # attach each plc to a plc box and an IP address
1049 plcs = [ self.provision_plc (plc,options) for plc in plcs ]
1050 # attach each node/qemu to a qemu box with an IP address
1051 plcs = [ self.provision_qemus (plc,options) for plc in plcs ]
1052 # update the SFA spec accordingly
1053 plcs = [ self.localize_sfa_rspec(plc,options) for plc in plcs ]
1056 except Exception, e:
1057 print '* Could not provision this test on current substrate','--',e,'--','exiting'
1058 traceback.print_exc()
1061 # it is expected that a couple of options like ips_bplc and ips_vplc
1062 # are set or unset together
1064 def check_options (x,y):
1065 if not x and not y: return True
1066 return len(x)==len(y)
1068 # find an available plc box (or make space)
1069 # and a free IP address (using options if present)
1070 def provision_plc (self, plc, options):
1072 assert Substrate.check_options (options.ips_bplc, options.ips_vplc)
1074 #### let's find an IP address for that plc
1076 if options.ips_vplc:
1078 # we don't check anything here,
1079 # it is the caller's responsability to cleanup and make sure this makes sense
1080 plc_boxname = options.ips_bplc.pop()
1081 vplc_hostname=options.ips_vplc.pop()
1083 if self.sense(): self.list()
1086 # try to find an available IP
1087 self.vplc_pool.sense()
1088 couple=self.vplc_pool.next_free()
1090 (vplc_hostname,unused)=couple
1091 #### we need to find one plc box that still has a slot
1093 # use the box that has max free spots for load balancing
1094 for pb in self.plc_boxes:
1095 free=pb.free_slots()
1097 plc_boxname=pb.hostname
1099 # if there's no available slot in the plc_boxes, or we need a free IP address
1100 # make space by killing the oldest running instance
1101 if not plc_boxname or not vplc_hostname:
1102 # find the oldest of all our instances
1103 all_plc_instances=reduce(lambda x, y: x+y,
1104 [ pb.plc_instances for pb in self.plc_boxes ],
1106 all_plc_instances.sort(timestamp_sort)
1108 plc_instance_to_kill=all_plc_instances[0]
1111 if not plc_boxname: msg += " PLC boxes are full"
1112 if not vplc_hostname: msg += " vplc IP pool exhausted"
1113 msg += " %s"%self.summary_line()
1114 raise Exception,"Cannot make space for a PLC instance:"+msg
1115 freed_plc_boxname=plc_instance_to_kill.plc_box.hostname
1116 freed_vplc_hostname=plc_instance_to_kill.vplcname()
1117 message='killing oldest plc instance = %s on %s'%(plc_instance_to_kill.line(),
1119 plc_instance_to_kill.kill()
1120 # use this new plcbox if that was the problem
1122 plc_boxname=freed_plc_boxname
1123 # ditto for the IP address
1124 if not vplc_hostname:
1125 vplc_hostname=freed_vplc_hostname
1126 # record in pool as mine
1127 self.vplc_pool.set_mine(vplc_hostname)
1130 self.add_dummy_plc(plc_boxname,plc['name'])
1131 vplc_ip = self.vplc_pool.get_ip(vplc_hostname)
1132 self.vplc_pool.add_starting(vplc_hostname, plc_boxname)
1134 #### compute a helpful vserver name
1135 # remove domain in hostname
1136 vplc_short = short_hostname(vplc_hostname)
1137 vservername = "%s-%d-%s" % (options.buildname,plc['index'],vplc_short)
1138 plc_name = "%s_%s"%(plc['name'],vplc_short)
1140 utils.header( 'PROVISION plc %s in box %s at IP %s as %s'%\
1141 (plc['name'],plc_boxname,vplc_hostname,vservername))
1143 #### apply in the plc_spec
1145 # label=options.personality.replace("linux","")
1146 mapper = {'plc': [ ('*' , {'host_box':plc_boxname,
1147 # 'name':'%s-'+label,
1149 'vservername':vservername,
1150 'vserverip':vplc_ip,
1151 'PLC_DB_HOST':vplc_hostname,
1152 'PLC_API_HOST':vplc_hostname,
1153 'PLC_BOOT_HOST':vplc_hostname,
1154 'PLC_WWW_HOST':vplc_hostname,
1155 'PLC_NET_DNS1' : self.network_settings() [ 'interface_fields:dns1' ],
1156 'PLC_NET_DNS2' : self.network_settings() [ 'interface_fields:dns2' ],
1161 # mappers only work on a list of plcs
1162 return TestMapper([plc],options).map(mapper)[0]
1165 def provision_qemus (self, plc, options):
1167 assert Substrate.check_options (options.ips_bnode, options.ips_vnode)
1169 test_mapper = TestMapper ([plc], options)
1170 nodenames = test_mapper.node_names()
1172 for nodename in nodenames:
1174 if options.ips_vnode:
1175 # as above, it's a rerun, take it for granted
1176 qemu_boxname=options.ips_bnode.pop()
1177 vnode_hostname=options.ips_vnode.pop()
1179 if self.sense(): self.list()
1182 # try to find an available IP
1183 self.vnode_pool.sense()
1184 couple=self.vnode_pool.next_free()
1186 (vnode_hostname,unused)=couple
1187 # find a physical box
1189 # use the box that has max free spots for load balancing
1190 for qb in self.qemu_boxes:
1191 free=qb.free_slots()
1193 qemu_boxname=qb.hostname
1195 # if we miss the box or the IP, kill the oldest instance
1196 if not qemu_boxname or not vnode_hostname:
1197 # find the oldest of all our instances
1198 all_qemu_instances=reduce(lambda x, y: x+y,
1199 [ qb.qemu_instances for qb in self.qemu_boxes ],
1201 all_qemu_instances.sort(timestamp_sort)
1203 qemu_instance_to_kill=all_qemu_instances[0]
1206 if not qemu_boxname: msg += " QEMU boxes are full"
1207 if not vnode_hostname: msg += " vnode IP pool exhausted"
1208 msg += " %s"%self.summary_line()
1209 raise Exception,"Cannot make space for a QEMU instance:"+msg
1210 freed_qemu_boxname=qemu_instance_to_kill.qemu_box.hostname
1211 freed_vnode_hostname=short_hostname(qemu_instance_to_kill.nodename)
1213 message='killing oldest qemu node = %s on %s'%(qemu_instance_to_kill.line(),
1215 qemu_instance_to_kill.kill()
1216 # use these freed resources where needed
1217 if not qemu_boxname:
1218 qemu_boxname=freed_qemu_boxname
1219 if not vnode_hostname:
1220 vnode_hostname=freed_vnode_hostname
1221 self.vnode_pool.set_mine(vnode_hostname)
1223 self.add_dummy_qemu (qemu_boxname,vnode_hostname)
1224 mac=self.vnode_pool.retrieve_userdata(vnode_hostname)
1225 ip=self.vnode_pool.get_ip (vnode_hostname)
1226 self.vnode_pool.add_starting(vnode_hostname,qemu_boxname)
1228 vnode_fqdn = self.fqdn(vnode_hostname)
1229 nodemap={'host_box':qemu_boxname,
1230 'node_fields:hostname':vnode_fqdn,
1231 'interface_fields:ip':ip,
1232 'ipaddress_fields:ip_addr':ip,
1233 'interface_fields:mac':mac,
1235 nodemap.update(self.network_settings())
1236 maps.append ( (nodename, nodemap) )
1238 utils.header("PROVISION node %s in box %s at IP %s with MAC %s"%\
1239 (nodename,qemu_boxname,vnode_hostname,mac))
1241 return test_mapper.map({'node':maps})[0]
1243 def localize_sfa_rspec (self,plc,options):
1245 plc['sfa']['SFA_REGISTRY_HOST'] = plc['PLC_DB_HOST']
1246 plc['sfa']['SFA_AGGREGATE_HOST'] = plc['PLC_DB_HOST']
1247 plc['sfa']['SFA_SM_HOST'] = plc['PLC_DB_HOST']
1248 plc['sfa']['SFA_DB_HOST'] = plc['PLC_DB_HOST']
1249 plc['sfa']['SFA_PLC_URL'] = 'https://' + plc['PLC_API_HOST'] + ':443/PLCAPI/'
1252 #################### release:
1253 def release (self,options):
1254 self.vplc_pool.release_my_starting()
1255 self.vnode_pool.release_my_starting()
1258 #################### show results for interactive mode
1259 def get_box (self,boxname):
1260 for b in self.build_boxes + self.plc_boxes + self.qemu_boxes + [self.test_box] :
1261 if b.shortname()==boxname: return b
1263 if b.shortname()==boxname.split('.')[0]: return b
1265 print "Could not find box %s"%boxname
1268 def list_boxes(self,box_or_names):
1270 for box in box_or_names:
1271 if not isinstance(box,Box): box=self.get_box(box)
1272 if not box: continue
1273 box.sense(self.options)
1275 for box in box_or_names:
1276 if not isinstance(box,Box): box=self.get_box(box)
1277 if not box: continue
1278 box.list(self.options.verbose)
1280 def reboot_boxes(self,box_or_names):
1281 for box in box_or_names:
1282 if not isinstance(box,Box): box=self.get_box(box)
1283 if not box: continue
1284 box.reboot(self.options)
1286 ####################
1287 # can be run as a utility to probe/display/manage the local infrastructure
1289 parser=OptionParser()
1290 parser.add_option ('-r',"--reboot",action='store_true',dest='reboot',default=False,
1291 help='reboot mode (use shutdown -r)')
1292 parser.add_option ('-s',"--soft",action='store_true',dest='soft',default=False,
1293 help='soft mode for reboot (vserver stop or kill qemus)')
1294 parser.add_option ('-t',"--testbox",action='store_true',dest='testbox',default=False,
1295 help='add test box')
1296 parser.add_option ('-b',"--build",action='store_true',dest='builds',default=False,
1297 help='add build boxes')
1298 parser.add_option ('-p',"--plc",action='store_true',dest='plcs',default=False,
1299 help='add plc boxes')
1300 parser.add_option ('-q',"--qemu",action='store_true',dest='qemus',default=False,
1301 help='add qemu boxes')
1302 parser.add_option ('-a',"--all",action='store_true',dest='all',default=False,
1303 help='address all known boxes, like -b -t -p -q')
1304 parser.add_option ('-v',"--verbose",action='store_true',dest='verbose',default=False,
1305 help='verbose mode')
1306 parser.add_option ('-n',"--dry_run",action='store_true',dest='dry_run',default=False,
1307 help='dry run mode')
1308 (self.options,args)=parser.parse_args()
1310 self.rescope (plcs_on_vs=True, plcs_on_lxc=True)
1313 if self.options.testbox: boxes += [self.test_box]
1314 if self.options.builds: boxes += self.build_boxes
1315 if self.options.plcs: boxes += self.plc_boxes
1316 if self.options.qemus: boxes += self.qemu_boxes
1317 if self.options.all: boxes += self.all_boxes
1319 # default scope is -b -p -q -t
1321 boxes = self.build_boxes + self.plc_boxes + self.qemu_boxes + [self.test_box]
1323 if self.options.reboot: self.reboot_boxes (boxes)
1324 else: self.list_boxes (boxes)