# the experimental lxc-based build box
def build_lxc_boxes_spec (self):
-# liquid only used for the last f18 build
- return [ 'buzzcocks', 'liquid' ]
+ return [ 'buzzcocks' ]
# the lxc-capable box for PLCs
def plc_lxc_boxes_spec (self):
- return [
-# gotan looks in bad shape and I can't use its drac from the US
-# ('gotan', 20), # how many plcs max in this box
- ('buzzcocks', 12),
- ]
+ # we now use the same box as for builds
+ return [ ('buzzcocks', 12), ]
+ def qemu_boxes_spec (self):
+ # ditto, a single big box now is enough
+ return [ ('boxtops', 64), ]
+
+
# vplc01 to 40
def vplc_ips (self):
return [ ( 'vplc{:02d}'.format(i), # DNS name
'unused') # MAC address
for i in range(1,41) ]
- def qemu_boxes_spec (self):
- return [ # (hostname, how many qemus max in this box)
- ('boxtops', 64),
- ]
-
-# these boxes are going on a well deserved retirement
-# as of jan 2014 this is renumbered so that 1 is preferred
-# speedball (1) - old school but robust and a big disk
-# ('speedball', 2), # 4 cores, 4Gb, 840 Gb
-# used to have kruder too, but it is broken/dead
-# dorfmeister (2)
-# ('dorfmeister', 2), # 4 cores, 4Gb
-# enfoui - L119 as it won't work well under the KVM in L102A
-# use F10 to enter BIOS setup
-# nodes spawned in this box won't get network connectivity
-# ('enfoui', 4), # 4 cores, 8Gb
-# estran (4) - big mem but small disk
-# take it out because it takes much of the load and then gets full..
-# ('estran', 2), # 4 cores, 8Gb
-# lodos (5) - rather old/small
-# ('lodos', 1), # 2 cores, 4Gb
-# cyblok (6)
-# ('cyblok', 1), # 2 cores, 4Gb
-
# the nodes pool has a MAC address as user-data (3rd elt in tuple)
def vnode_ips (self):
return [ ( 'vnode{:02d}'.format(i), # DNS name
'ipaddress_fields:netmask': '255.255.248.0',
}
-# the hostname for the testmaster - in case we'd like to run this remotely
+# the hostname for the testmaster that orchestrates the whole business
def testmaster (self):
return 'testmaster'
#
# Thierry Parmentelat <thierry.parmentelat@inria.fr>
-# Copyright (C) 2010 INRIA
+# Copyright (C) 2010-2015 INRIA
#
# #################### history
#
def __init__ (self):
self.tuples=[]
+ def __repr__(self):
+ return '<Starting>'
+
def load (self):
try:
with open(Starting.location) as starting:
self.status = None
self.ip = None
+ def __repr__(self):
+ return "<PoolItem {} {}>".format(self.hostname, self.userdata)
+
def line(self):
return "Pooled {} ({}) -> {}".format(self.hostname, self.userdata, self.status)
# where to send notifications upon load_starting
self.substrate = substrate
+ def __repr__(self):
+ return "<Pool {} {}>".format(self.message, self.tuples)
+
def list (self, verbose=False):
for i in self.pool_items: print(i.line())
def _item (self, hostname):
for i in self.pool_items:
if i.hostname == hostname: return i
- raise Exception ("Could not locate hostname %s in pool %s"%(hostname,self.message))
+ raise Exception ("Could not locate hostname {} in pool {}".format(hostname, self.message))
def retrieve_userdata (self, hostname):
return self._item(hostname).userdata
try:
self._item(hostname).status='mine'
except:
- print('WARNING: host %s not found in IP pool %s'%(hostname,self.message))
+ print('WARNING: host {} not found in IP pool {}'.format(hostname, self.message))
def next_free (self):
for i in self.pool_items:
def sense (self):
print('Sensing IP pool', self.message, end=' ')
+ sys.stdout.flush()
self._sense()
print('Done')
for (vname,bname) in self.load_starting():
def __init__ (self, hostname):
self.hostname = hostname
self._probed = None
+ def __repr__(self):
+ return "<Box {}>".format(self.hostname)
def shortname (self):
return short_hostname(self.hostname)
def test_ssh (self):
self.buildname = buildname
self.buildbox = buildbox
self.pids = [pid]
+ def __repr__(self):
+ return "<BuildInstance {} in {}>".format(self.buildname, self.buildbox)
def add_pid(self,pid):
self.pids.append(pid)
def __init__ (self, hostname):
Box.__init__(self, hostname)
self.build_instances = []
+ def __repr__(self):
+ return "<BuildBox {}>".format(self.hostname)
def add_build(self, buildname, pid):
for build in self.build_instances:
# inspect box and find currently running builds
def sense(self, options):
print('xb', end=' ')
+ sys.stdout.flush()
pids = self.backquote_ssh(['pgrep','lbuild'], trash_err=True)
if not pids: return
command = ['ps', '-o', 'pid,command'] + [ pid for pid in pids.split("\n") if pid]
self.plc_box = plcbox
# unknown yet
self.timestamp = 0
+ def __repr__(self):
+ return "<PlcInstance {}>".format(self.plc_box)
def set_timestamp (self,timestamp):
self.timestamp = timestamp
PlcInstance.__init__(self, plcbox)
self.lxcname = lxcname
self.pid = pid
+ def __repr__(self):
+ return "<PlcLxcInstance {}>".format(self.lxcname)
def vplcname (self):
return self.lxcname.split('-')[-1]
Box.__init__(self, hostname)
self.plc_instances = []
self.max_plcs = max_plcs
+ def __repr__(self):
+ return "<PlcBox {}>".format(self.hostname)
def free_slots (self):
return self.max_plcs - len(self.plc_instances)
# to describe the currently running VM's
def sense(self, options):
print("xp", end=' ')
+ sys.stdout.flush()
command = "rsync lxc-driver.sh {}:/root".format(self.hostname)
subprocess.getstatusoutput(command)
command = ['/root/lxc-driver.sh', '-c', 'sense_all']
for lxc_line in lxc_stat.split("\n"):
if not lxc_line:
continue
+ # we mix build and plc VMs
if 'vplc' not in lxc_line:
continue
lxcname = lxc_line.split(";")[0]
# not known yet
self.buildname = None
self.timestamp = 0
+ def __repr__(self):
+ return "<QemuInstance {}>".format(self.nodename)
def set_buildname (self, buildname):
self.buildname = buildname
Box.__init__(self, hostname)
self.qemu_instances = []
self.max_qemus = max_qemus
+ def __repr__(self):
+ return "<QemuBox {}>".format(self.hostname)
def add_node(self, nodename, pid):
for qemu in self.qemu_instances:
def sense(self, options):
print('qn', end=' ')
+ sys.stdout.flush()
modules = self.backquote_ssh(['lsmod']).split('\n')
self._driver = '*NO kqemu/kvm_intel MODULE LOADED*'
for module in modules:
# has a KO test
self.broken_steps = []
self.timestamp = 0
+ def __repr__(self):
+ return "<TestInstance {}>".format(self.buildname)
def set_timestamp(self, timestamp):
self.timestamp = timestamp
Box.__init__(self, hostname)
self.starting_ips = []
self.test_instances = []
+ def __repr__(self):
+ return "<TestBox {}>".format(self.hostname)
def reboot(self, options):
# can't reboot a vserver VM
def sense(self, options):
print('tm', end=' ')
- self.starting_ips = [x for x in self.backquote_ssh(['cat',Starting.location], trash_err=True).strip().split('\n') if x]
+ self.starting_ips = [ x for x in self.backquote_ssh( ['cat', Starting.location], trash_err=True).strip().split('\n') if x ]
# scan timestamps on all tests
# this is likely to not invoke ssh so we need to be a bit smarter to get * expanded
self.plc_boxes = self.plc_lxc_boxes
self.default_boxes = self.plc_boxes + self.qemu_boxes
self.all_boxes = self.build_boxes + [ self.test_box ] + self.plc_boxes + self.qemu_boxes
+ def __repr__(self):
+ return "<Substrate>".format()
def summary_line (self):
msg = "["
if self._sensed and not force:
return False
print('Sensing local substrate...', end=' ')
+ sys.stdout.flush()
for b in self.default_boxes:
b.sense(self.options)
print('Done')
def list_boxes(self):
print('Sensing', end=' ')
+ sys.stdout.flush()
for box in self.focus_all:
box.sense(self.options)
print('Done')
path=/vservers
function sense_all () {
- virsh -c lxc:// list | grep running | while read line; do
+ virsh -c lxc:/// list 2> /dev/null | grep running | while read line; do
pid=$(echo $line | cut -d' ' -f1)
lxc_name=$(echo $line | cut -d' ' -f2)
- timestamp=$(cat $path/$lxc_name/$lxc_name.timestamp)
+ timestamp=$(cat $path/$lxc_name/$lxc_name.timestamp 2> /dev/null)
echo "$lxc_name;$pid;$timestamp"
done
}
function start_all () {
- virsh -c lxc:// list --inactive | grep " - "| while read line; do
+ virsh -c lxc:/// list --inactive | grep " - "| while read line; do
lxc_name=$(echo $line | cut -d' ' -f2)
- virsh -c lxc:// start $lxc_name
+ virsh -c lxc:/// start $lxc_name
done
}
function stop_all () {
- virsh -c lxc:// list | grep running | while read line; do
+ virsh -c lxc:/// list | grep running | while read line; do
lxc_name=$(echo $line | cut -d' ' -f2)
- virsh -c lxc:// destroy $lxc_name
+ virsh -c lxc:/// destroy $lxc_name
done
}
function sense_lxc () {
lxc_name=$1; shift
- if [ "$(virsh -c lxc:// dominfo $lxc_name | grep State| cut -d' ' -f11)" == "running" ] ; then
- pid=$(virsh -c lxc:// dominfo $lxc_name| grep Id | cut -d' ' -f14)
+ if [ "$(virsh -c lxc:/// dominfo $lxc_name | grep State| cut -d' ' -f11)" == "running" ] ; then
+ pid=$(virsh -c lxc:/// dominfo $lxc_name| grep Id | cut -d' ' -f14)
timestamp=$(cat $path/$lxc_name/$lxc_name.timestamp)
echo "$lxc_name;$pid;$timestamp"
fi
function start_lxc () {
lxc_name=$1; shift
- if [ "$(virsh -c lxc:// dominfo $lxc_name | grep State| cut -d' ' -f11)" != "running" ] ; then
- virsh -c lxc:// start $lxc_name
+ if [ "$(virsh -c lxc:/// dominfo $lxc_name | grep State| cut -d' ' -f11)" != "running" ] ; then
+ virsh -c lxc:/// start $lxc_name
fi
}
function stop_lxc () {
lxc_name=$1; shift
- if [ "$(virsh -c lxc:// dominfo $lxc_name | grep State| cut -d' ' -f11)" != "shut off" ] ; then
- virsh -c lxc:// destroy $lxc_name
+ if [ "$(virsh -c lxc:/// dominfo $lxc_name | grep State| cut -d' ' -f11)" != "shut off" ] ; then
+ virsh -c lxc:/// destroy $lxc_name
fi
}
function destroy_all () {
stop_all
- virsh -c lxc:// list --all | while read line; do
+ virsh -c lxc:/// list --all | while read line; do
lxc_name=$(echo $line | cut -d' ' -f2)
- virsh -c lxc:// undefine $lxc_name
+ virsh -c lxc:/// undefine $lxc_name
rm -fr $path/$lxc_name
done
}
lxc_name=$1; shift
stop_lxc $lxc_name
- virsh -c lxc:// undefine $lxc_name
+ virsh -c lxc:/// undefine $lxc_name
rm -fr $path/$lxc_name
}