def __init__(self, name):
self.name = name
self.keys = ''
- logger.verbose('account: Initing account %s'%name)
+ logger.verbose('account: Initing account {}'.format(name))
@staticmethod
def create(name, vref = None):
dot_ssh = os.path.join(pw_dir, '.ssh')
if not os.path.isdir(dot_ssh):
if not os.path.isdir(pw_dir):
- logger.verbose('account: WARNING: homedir %s does not exist for %s!'%(pw_dir, self.name))
+ logger.verbose('account: WARNING: homedir {} does not exist for {}!'
+ .format(pw_dir, self.name))
os.mkdir(pw_dir)
os.chown(pw_dir, uid, gid)
os.mkdir(dot_ssh)
# set self.keys to new_keys only when all of the above ops succeed
self.keys = new_keys
- logger.log('account: %s: installed ssh keys' % self.name)
+ logger.log('account: {}: installed ssh keys'.format(self.name))
def start(self, delay=0):
pass
def _manage_ssh_dir (slicename, do_mount):
logger.log ("_manage_ssh_dir, requested to "+("mount" if do_mount else "umount")+" ssh dir for "+ slicename)
try:
- root_ssh = "/home/%s/.ssh"%slicename
- sliver_ssh = "/vservers/%s/home/%s/.ssh"%(slicename, slicename)
+ root_ssh = "/home/{}/.ssh".format(slicename)
+ sliver_ssh = "/vservers/{}/home/{}/.ssh".format(slicename, slicename)
def is_mounted (root_ssh):
for mount_line in file('/proc/mounts').readlines():
if mount_line.find (root_ssh) >= 0:
command = ['mount', '--bind', '-o', 'ro', root_ssh, sliver_ssh]
mounted = logger.log_call (command)
msg = "OK" if mounted else "WARNING: FAILED"
- logger.log("_manage_ssh_dir: mounted %s into slice %s - %s"%(root_ssh, slicename, msg))
+ logger.log("_manage_ssh_dir: mounted {} into slice {} - {}"
+ .format(root_ssh, slicename, msg))
else:
if is_mounted (sliver_ssh):
command = ['umount', sliver_ssh]
umounted = logger.log_call(command)
msg = "OK" if umounted else "WARNING: FAILED"
- logger.log("_manage_ssh_dir: umounted %s - %s"%(sliver_ssh, msg))
+ logger.log("_manage_ssh_dir: umounted {} - {}"
+ .format(sliver_ssh, msg))
except:
logger.log_exc("_manage_ssh_dir failed", name=slicename)
Check account type is still valid. If not, recreate sliver.
If still valid, check if running and configure/start if not.
"""
- logger.log_data_in_file(rec, "/var/lib/nodemanager/%s.rec.txt"%rec['name'],
+ logger.log_data_in_file(rec, "/var/lib/nodemanager/{}.rec.txt".format(rec['name']),
'raw rec captured in ensure_created', logger.LOG_VERBOSE)
curr_class = self._get_class()
next_class = type_acct_class[rec['type']]
create_sem.acquire()
try: next_class.create(self.name, rec)
finally: create_sem.release()
- if not isinstance(self._acct, next_class): self._acct = next_class(rec)
- logger.verbose("account.Worker.ensure_created: %s, running=%r"%(self.name, self.is_running()))
+ if not isinstance(self._acct, next_class):
+ self._acct = next_class(rec)
+ logger.verbose("account.Worker.ensure_created: {}, running={}"
+ .format(self.name, self.is_running()))
# reservation_alive is set on reservable nodes, and its value is a boolean
if 'reservation_alive' in rec:
status = True
else:
status = False
- logger.verbose("account: Worker(%s): is not running" % self.name)
+ logger.verbose("account: Worker({}): is not running".format(self.name))
return status
def _destroy(self, curr_class):
parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session',
help='API session key (or file)')
parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period,
- help='Polling interval (sec) - default %d'%NodeManager.default_period)
+ help='Polling interval (sec) - default {}'
+ .format(NodeManager.default_period))
parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random,
- help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random)
+ help='Range for additional random polling interval (sec) -- default {}'
+ .format(NodeManager.default_random))
parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
help='more verbose log')
parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH,
if self.options.user_module:
assert self.options.user_module in self.modules
self.modules = [self.options.user_module]
- logger.verbose('nodemanager: Running single module %s'%self.options.user_module)
+ logger.verbose('nodemanager: Running single module {}'.format(self.options.user_module))
def GetSlivers(self, config, plc):
last_data = self.loadSlivers()
# Invoke GetSlivers() functions from the callback modules
for module in self.loaded_modules:
- logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__)
+ logger.verbose('nodemanager: triggering {}.GetSlivers'.format(module.__name__))
try:
callback = getattr(module, 'GetSlivers')
module_data = data
except SystemExit as e:
sys.exit(e)
except:
- logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module)
+ logger.log_exc("nodemanager: GetSlivers failed to run callback for module {}"
+ .format(module))
def getPLCDefaults(self, data, config):
attr_dict = {}
for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value']
if len(attr_dict):
- logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict)
+ logger.verbose("nodemanager: Found default slice overrides.\n {}".format(attr_dict))
config.OVERRIDES = attr_dict
return
# NOTE: if an _default slice existed, it would have been found above and
def dumpSlivers (self, slivers):
f = open(NodeManager.DB_FILE, "w")
- logger.log ("nodemanager: saving successfully fetched GetSlivers in %s" % NodeManager.DB_FILE)
+ logger.log ("nodemanager: saving successfully fetched GetSlivers in {}".format(NodeManager.DB_FILE))
pickle.dump(slivers, f)
f.close()
def loadSlivers (self):
try:
f = open(NodeManager.DB_FILE, "r+")
- logger.log("nodemanager: restoring latest known GetSlivers from %s" % NodeManager.DB_FILE)
+ logger.log("nodemanager: restoring latest known GetSlivers from {}".format(NodeManager.DB_FILE))
slivers = pickle.load(f)
f.close()
return slivers
except:
- logger.log("Could not restore GetSlivers from %s" % NodeManager.DB_FILE)
+ logger.log("Could not restore GetSlivers from {}".format(NodeManager.DB_FILE))
return {}
def run(self):
try:
other_pid = tools.pid_file()
if other_pid != None:
- print """There might be another instance of the node manager running as pid %d.
-If this is not the case, please remove the pid file %s. -- exiting""" % (other_pid, tools.PID_FILE)
+ print """There might be another instance of the node manager running as pid {}.
+If this is not the case, please remove the pid file {}. -- exiting""".format(other_pid, tools.PID_FILE)
return
except OSError, err:
print "Warning while writing PID file:", err
for module in self.modules:
try:
m = __import__(module)
- logger.verbose("nodemanager: triggering %s.start" % m.__name__)
+ logger.verbose("nodemanager: triggering {}.start".format(m.__name__))
try: m.start()
- except: logger.log("WARNING: module %s did not start")
+ except: logger.log("WARNING: module {} did not start".format(m.__name__))
self.loaded_modules.append(m)
except:
if module not in NodeManager.core_modules:
- logger.log_exc ("ERROR while loading module %s - skipped" % module)
+ logger.log_exc ("ERROR while loading module {} - skipped".format(module))
else:
- logger.log("FATAL : failed to start core module %s"%module)
+ logger.log("FATAL : failed to start core module {}".format(module))
sys.exit(1)
# sort on priority (lower first)
- def sort_module_priority (m1, m2):
- return getattr(m1,'priority',NodeManager.default_priority) - getattr(m2,'priority',NodeManager.default_priority)
- self.loaded_modules.sort(sort_module_priority)
+ def module_priority (m):
+ return getattr(m,'priority',NodeManager.default_priority)
+ self.loaded_modules.sort(key=module_priority)
logger.log('ordered modules:')
for module in self.loaded_modules:
- logger.log ('%s: %s'%(getattr(module,'priority',NodeManager.default_priority),module.__name__))
+ logger.log ('{}: {}'.format(getattr(module, 'priority', NodeManager.default_priority),
+ module.__name__))
# Load /etc/planetlab/session
if os.path.exists(self.options.session):
plc.update_session()
logger.log("nodemanager: Authentication Failure. Retrying")
except Exception,e:
- logger.log("nodemanager: Retry Failed. (%r); Waiting.."%e)
+ logger.log("nodemanager: Retry Failed. ({}); Waiting..".format(e))
time.sleep(iperiod)
logger.log("nodemanager: Authentication Succeeded!")
while True:
# Main nodemanager Loop
work_beg = time.time()
- logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom))
+ logger.log('nodemanager: mainloop - calling GetSlivers - period={} random={}'
+ .format(iperiod, irandom))
self.GetSlivers(config, plc)
delay = iperiod + random.randrange(0,irandom)
work_end = time.time()
work_duration = int(work_end-work_beg)
- logger.log('nodemanager: mainloop has worked for %s s - sleeping for %d s'%(work_duration,delay))
+ logger.log('nodemanager: mainloop has worked for {} s - sleeping for {} s'
+ .format(work_duration,delay))
time.sleep(delay)
except SystemExit:
pass
def __init__(self, rec):
self.name = rec['name']
- logger.verbose ('sliver_libvirt: %s init'%(self.name))
+ logger.verbose ('sliver_libvirt: {} init'.format(self.name))
# Assume the directory with the image and config files
# are in place
try:
dom = self.conn.lookupByName(self.name)
except:
- logger.log('sliver_libvirt: Domain %s does not exist. ' \
- 'Will try to create it again.' % (self.name))
+ logger.log('sliver_libvirt: Domain {} does not exist. ' \
+ 'Will try to create it again.'.format(self.name))
self.__class__.create(rec['name'], rec)
dom = self.conn.lookupByName(self.name)
self.dom = dom
@staticmethod
def dom_details (dom):
output = ""
- output += " id=%s - OSType=%s"%(dom.ID(), dom.OSType())
+ output += " id={} - OSType={}".format(dom.ID(), dom.OSType())
# calling state() seems to be working fine
(state, reason) = dom.state()
- output += " state=%s, reason=%s"%(STATES.get(state, state),REASONS.get(reason, reason))
+ output += " state={}, reason={}".format(STATES.get(state, state),
+ REASONS.get(reason, reason))
try:
# try to use info() - this however does not work for some reason on f20
# info cannot get info operation failed: Cannot read cputime for domain
[state, maxmem, mem, ncpu, cputime] = dom.info()
- output += " [info: maxmem = %s, mem = %s, ncpu = %s, cputime = %s]" % (STATES.get(state, state), maxmem, mem, ncpu, cputime)
+ output += " [info: state={}, maxmem = {}, mem = {}, ncpu = {}, cputime = {}]"\
+ .format(STATES.get(state, state), maxmem, mem, ncpu, cputime)
except:
# too bad but libvirt.py prints out stuff on stdout when this fails, don't know how to get rid of that..
output += " [info: not available]"
def __repr__(self):
''' Helper method to get a "nice" output of the domain struct for debug purposes'''
- output = "Domain %s"%self.name
+ output = "Domain {}".format(self.name)
dom = self.dom
if dom is None:
output += " [no attached dom ?!?]"
# copy of the sliver XML config; I feel like issuing a virsh dumpxml first might be safer
def repair_veth(self):
# See workaround email, 2-14-2014, "libvirt 1.2.1 rollout"
- xml = open("/etc/libvirt/lxc/%s.xml" % self.name).read()
+ xmlfilename = "/etc/libvirt/lxc/{}.xml".format(self.name)
+ with open(xmlfilename) as xmlfile:
+ xml = xmlfile.read()
veths = re.findall("<target dev='veth[0-9]*'/>", xml)
veths = [x[13:-3] for x in veths]
for veth in veths:
logger.log_call(command)
logger.log("trying to redefine the VM")
- command = ["virsh", "define", "/etc/libvirt/lxc/%s.xml" % self.name]
+ command = [ "virsh", "define", xmlfilename ]
logger.log_call(command)
def start(self, delay=0):
'''Just start the sliver'''
- logger.verbose('sliver_libvirt: %s start'%(self.name))
+ logger.verbose('sliver_libvirt: {} start'.format(self.name))
# Check if it's running to avoid throwing an exception if the
# domain was already running
# XXX smbaker: attempt to resolve slivers that are stuck in
# "failed to allocate free veth".
if "ailed to allocate free veth" in str(e):
- logger.log("failed to allocate free veth on %s" % self.name)
+ logger.log("failed to allocate free veth on {}".format(self.name))
self.repair_veth()
logger.log("trying dom.create again")
self.dom.create()
else:
raise
else:
- logger.verbose('sliver_libvirt: sliver %s already started'%(self.name))
+ logger.verbose('sliver_libvirt: sliver {} already started'.format(self.name))
# After the VM is started... we can play with the virtual interface
# Create the ebtables rule to mark the packets going out from the virtual
# interface to the actual device so the filter canmatch against the mark
- bwlimit.ebtables("-A INPUT -i veth%d -j mark --set-mark %d" % \
- (self.xid, self.xid))
+ bwlimit.ebtables("-A INPUT -i veth{} -j mark --set-mark {}"
+ .format(self.xid, self.xid))
def stop(self):
- logger.verbose('sliver_libvirt: %s stop'%(self.name))
+ logger.verbose('sliver_libvirt: {} stop'.format(self.name))
# Remove the ebtables rule before stopping
- bwlimit.ebtables("-D INPUT -i veth%d -j mark --set-mark %d" % \
- (self.xid, self.xid))
+ bwlimit.ebtables("-D INPUT -i veth{} -j mark --set-mark {}"
+ .format(self.xid, self.xid))
try:
self.dom.destroy()
''' Return True if the domain is running '''
(state, _) = self.dom.state()
result = (state == libvirt.VIR_DOMAIN_RUNNING)
- logger.verbose('sliver_libvirt.is_running: %s => %s'%(self, result))
+ logger.verbose('sliver_libvirt.is_running: {} => {}'
+ .format(self, result))
return result
def configure(self, rec):
#sliver.[LXC/QEMU] tolower case
#sliver_type = rec['type'].split('.')[1].lower()
- #BASE_DIR = '/cgroup/libvirt/%s/%s/'%(sliver_type, self.name)
+ #BASE_DIR = '/cgroup/libvirt/{}/{}/'.format(sliver_type, self.name)
# Disk allocation
# No way through cgroups... figure out how to do that with user/dir quotas.
# If configure is called before start, then the cgroups won't exist
# yet. NM will eventually re-run configure on the next iteration.
# TODO: Add a post-start configure, and move this stuff there
- logger.log("Configure: postponing tag check on %s as cgroups are not yet populated" % self.name)
+ logger.log("Configure: postponing tag check on {} as cgroups are not yet populated"
+ .format(self.name))
else:
tags = rec["rspec"]["tags"]
# It will depend on the FS selection
@staticmethod
def get_unique_vif():
- return 'veth%s' % random.getrandbits(32)
+ return 'veth{}'.format(random.getrandbits(32))
# A placeholder until we get true VirtualInterface objects
@staticmethod
xml = """
<interface type='network'>
<source network='default'/>
- <target dev='%s'/>
+ <target dev='{}'/>
</interface>
-""" % (Sliver_Libvirt.get_unique_vif())
+""".format(Sliver_Libvirt.get_unique_vif())
try:
tags = rec['rspec']['tags']
if 'interface' in tags:
tag_xml = ""
for interface in interfaces:
if 'vlan' in interface:
- vlanxml = "<vlan><tag id='%s'/></vlan>" % interface['vlan']
+ vlanxml = "<vlan><tag id='{}'/></vlan>".format(interface['vlan'])
else:
vlanxml = ""
if 'bridge' in interface:
tag_xml = tag_xml + """
<interface type='bridge'>
- <source bridge='%s'/>
- %s
+ <source bridge='{}'/>
+ {}
<virtualport type='openvswitch'/>
- <target dev='%s'/>
+ <target dev='{}'/>
</interface>
- """ % (interface['bridge'], vlanxml, Sliver_Libvirt.get_unique_vif())
+ """.format(interface['bridge'], vlanxml, Sliver_Libvirt.get_unique_vif())
else:
tag_xml = tag_xml + """
<interface type='network'>
<source network='default'/>
- <target dev='%s'/>
+ <target dev='{}'/>
</interface>
- """ % (Sliver_Libvirt.get_unique_vif())
+ """.format(Sliver_Libvirt.get_unique_vif())
xml = tag_xml
- logger.log('sliver_libvirty.py: interface XML is: %s' % xml)
+ logger.log('sliver_libvirty.py: interface XML is: {}'.format(xml))
except:
- logger.log('sliver_libvirt.py: ERROR parsing "interface" tag for slice %s' % rec['name'])
- logger.log('sliver_libvirt.py: tag value: %s' % tags['interface'])
+ logger.log('sliver_libvirt.py: ERROR parsing "interface" tag for slice {}'.format(rec['name']))
+ logger.log('sliver_libvirt.py: tag value: {}'.format(tags['interface']))
return xml
def start(self, delay=0):
logger.log('==================== sliver_lxc.start {}'.format(self.name))
if 'enabled' in self.rspec and self.rspec['enabled'] <= 0:
- logger.log('sliver_lxc: not starting %s, is not enabled'%self.name)
+ logger.log('sliver_lxc: not starting {}, is not enabled'.format(self.name))
return
# the generic /etc/init.d/vinit script is permanently refreshed, and enabled
self.install_and_enable_vinit()
'''
Create dirs, copy fs image, lxc_create
'''
- logger.verbose('sliver_lxc: %s create' % name)
+ logger.verbose('sliver_lxc: {} create'.format(name))
conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
vref = rec['vref']
if vref is None:
vref = "lxc-f18-x86_64"
- logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name, vref))
+ logger.log("sliver_libvirt: {}: WARNING - no vref attached, using hard-wired default {}"
+ .format(name, vref))
# compute guest arch from vref
# essentially we want x86_64 (default) or i686 here for libvirt
# check the template exists -- there's probably a better way..
if not os.path.isdir(refImgDir):
- logger.log('sliver_lxc: %s: ERROR Could not create sliver - reference image %s not found' % (name, vref))
- logger.log('sliver_lxc: %s: ERROR Expected reference image in %s'%(name, refImgDir))
+ logger.log('sliver_lxc: {}: ERROR Could not create sliver - reference image {} not found'
+ .format(name, vref))
+ logger.log('sliver_lxc: %s: ERROR Expected reference image in {}'.format(name, refImgDir))
return
# this hopefully should be fixed now
# # so we need to check the expected container rootfs does not exist yet
# # this hopefully could be removed in a future release
# if os.path.exists (containerDir):
-# logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name, containerDir))
+# logger.log("sliver_lxc: {}: WARNING cleaning up pre-existing {}".format(name, containerDir))
# command = ['btrfs', 'subvolume', 'delete', containerDir]
# logger.log_call(command, BTRFS_TIMEOUT)
# # re-check
# if os.path.exists (containerDir):
-# logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name, containerDir))
+# logger.log('sliver_lxc: {}: ERROR Could not create sliver - could not clean up empty {}'
+# .format(name, containerDir))
# return
# Snapshot the reference image fs
# Add unix account (TYPE is specified in the subclass)
command = ['/usr/sbin/useradd', '-g', 'slices', '-s', Sliver_LXC.SHELL, name, '-p', '*']
logger.log_call(command)
- command = ['mkdir', '/home/%s/.ssh'%name]
+ command = ['mkdir', '/home/{}/.ssh'.format(name)]
logger.log_call(command)
# Create PK pair keys to connect from the host to the guest without
# password... maybe remove the need for authentication inside the
# guest?
- command = ['su', '-s', '/bin/bash', '-c', 'ssh-keygen -t rsa -N "" -f /home/%s/.ssh/id_rsa'%(name)]
+ command = ['su', '-s', '/bin/bash', '-c',
+ 'ssh-keygen -t rsa -N "" -f /home/{}/.ssh/id_rsa'.format(name)]
logger.log_call(command)
- command = ['chown', '-R', '%s.slices'%name, '/home/%s/.ssh'%name]
+ command = ['chown', '-R', '{}.slices'.format(name), '/home/{}/.ssh'.format(name)]
logger.log_call(command)
- command = ['mkdir', '%s/root/.ssh'%containerDir]
+ command = ['mkdir', '{}/root/.ssh'.format(containerDir)]
logger.log_call(command)
- command = ['cp', '/home/%s/.ssh/id_rsa.pub'%name, '%s/root/.ssh/authorized_keys'%containerDir]
+ command = ['cp', '/home/{}/.ssh/id_rsa.pub'.format(name),
+ '{}/root/.ssh/authorized_keys'.format(containerDir)]
logger.log_call(command)
- logger.log("creating /etc/slicename file in %s" % os.path.join(containerDir, 'etc/slicename'))
+ logger.log("creating /etc/slicename file in {}".format(os.path.join(containerDir, 'etc/slicename')))
try:
file(os.path.join(containerDir, 'etc/slicename'), 'w').write(name)
except:
logger.log_exc("exception while getting user id")
if uid is not None:
- logger.log("uid is %d" % uid)
- command = ['mkdir', '%s/home/%s' % (containerDir, name)]
+ logger.log("uid is {}".format(uid))
+ command = ['mkdir', '{}/home/{}'.format(containerDir, name)]
logger.log_call(command)
- command = ['chown', name, '%s/home/%s' % (containerDir, name)]
+ command = ['chown', name, '{}/home/{}'.format(containerDir, name)]
logger.log_call(command)
etcpasswd = os.path.join(containerDir, 'etc/passwd')
etcgroup = os.path.join(containerDir, 'etc/group')
if os.path.exists(etcpasswd):
# create all accounts with gid=1001 - i.e. 'slices' like it is in the root context
slices_gid = 1001
- logger.log("adding user %(name)s id %(uid)d gid %(slices_gid)d to %(etcpasswd)s" % (locals()))
+ logger.log("adding user {name} id {uid} gid {slices_gid} to {etcpasswd}"
+ .format(**(locals())))
try:
- file(etcpasswd, 'a').write("%(name)s:x:%(uid)d:%(slices_gid)d::/home/%(name)s:/bin/bash\n" % locals())
+ with open(etcpasswd, 'a') as passwdfile:
+ passwdfile.write("{name}:x:{uid}:{slices_gid}::/home/{name}:/bin/bash\n"
+ .format(**locals()))
except:
- logger.log_exc("exception while updating %s"%etcpasswd)
- logger.log("adding group slices with gid %(slices_gid)d to %(etcgroup)s"%locals())
+ logger.log_exc("exception while updating {}".format(etcpasswd))
+ logger.log("adding group slices with gid {slices_gid} to {etcgroup}"
+ .format(**locals()))
try:
- file(etcgroup, 'a').write("slices:x:%(slices_gid)d\n"%locals())
+ with open(etcgroup, 'a') as groupfile:
+ groupfile.write("slices:x:{slices_gid}\n"
+ .format(**locals()))
except:
- logger.log_exc("exception while updating %s"%etcgroup)
+ logger.log_exc("exception while updating {}".format(etcgroup))
sudoers = os.path.join(containerDir, 'etc/sudoers')
if os.path.exists(sudoers):
try:
- file(sudoers, 'a').write("%s ALL=(ALL) NOPASSWD: ALL\n" % name)
+ file(sudoers, 'a').write("{} ALL=(ALL) NOPASSWD: ALL\n".format(name))
except:
logger.log_exc("exception while updating /etc/sudoers")
unset pathmunge
"""
with open(pl_profile, 'w') as f:
- f.write("export PS1='%s@\H \$ '\n"%(name))
- f.write("%s\n"%ld_preload_text)
+ f.write("export PS1='{}@\H \$ '\n".format(name))
+ f.write("{}\n".format(ld_preload_text))
f.write("export LD_PRELOAD=/etc/planetlab/lib/bind_public.so\n")
- f.write("%s\n"%usrmove_path_text)
- f.write("%s\n"%usrmove_path_code)
+ f.write("{}\n".format(usrmove_path_text))
+ f.write("{}\n".format(usrmove_path_code))
# make sure this file is sourced from both root's and slice's .profile
enforced_line = "[ -f /etc/planetlab.profile ] && source /etc/planetlab.profile\n"
- for path in [ 'root/.profile', 'home/%s/.profile'%name ]:
+ for path in [ 'root/.profile', 'home/{}/.profile'.format(name) ]:
from_root = os.path.join(containerDir, path)
# if dir is not yet existing let's forget it for now
if not os.path.isdir(os.path.dirname(from_root)): continue
user_profile.write(enforced_line)
# in case we create the slice's .profile when writing
if from_root.find("/home") >= 0:
- command = ['chown', '%s:slices'%name, from_root]
+ command = ['chown', '{}:slices'.format(name), from_root]
logger.log_call(command)
# Lookup for xid and create template after the user is created so we
# Template for libvirt sliver configuration
template_filename_sliceimage = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, 'lxc_template.xml')
if os.path.isfile (template_filename_sliceimage):
- logger.verbose("Using XML template %s"%template_filename_sliceimage)
+ logger.verbose("Using XML template {}".format(template_filename_sliceimage))
template_filename = template_filename_sliceimage
else:
- logger.log("Cannot find XML template %s"%template_filename_sliceimage)
+ logger.log("Cannot find XML template {}".format(template_filename_sliceimage))
return
interfaces = Sliver_Libvirt.get_interfaces_xml(rec)
template = Template(f.read())
xml = template.substitute(name=name, xid=xid, interfaces=interfaces, arch=arch)
except IOError:
- logger.log('Failed to parse or use XML template file %s'%template_filename)
+ logger.log('Failed to parse or use XML template file {}'.format(template_filename))
return
# Lookup for the sliver before actually
dom = conn.lookupByName(name)
except:
dom = conn.defineXML(xml)
- logger.verbose('lxc_create: %s -> %s'%(name, Sliver_Libvirt.dom_details(dom)))
+ logger.verbose('lxc_create: {} -> {}'.format(name, Sliver_Libvirt.dom_details(dom)))
@staticmethod
def destroy(name):
# umount .ssh directory - only if mounted
Account.umount_ssh_dir(name)
- logger.verbose ('sliver_lxc: %s destroy'%(name))
+ logger.verbose ('sliver_lxc: {} destroy'.format(name))
conn = Sliver_Libvirt.getConnection(Sliver_LXC.TYPE)
containerDir = Sliver_LXC.CON_BASE_DIR + '/%s'%(name)
DEFAULT_ALLOCATION = {}
for rlimit in vserver.RLIMITS.keys():
rlim = rlimit.lower()
- DEFAULT_ALLOCATION["%s_min"%rlim] = KEEP_LIMIT
- DEFAULT_ALLOCATION["%s_soft"%rlim] = KEEP_LIMIT
- DEFAULT_ALLOCATION["%s_hard"%rlim] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["{}_min".format(rlim)] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["{}_soft".format(rlim)] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["{}_hard".format(rlim)] = KEEP_LIMIT
class Sliver_VS(vserver.VServer, Account, Initscript):
"""This class wraps vserver.VServer to make its interface closer to what we need."""
def __init__(self, rec):
name = rec['name']
- logger.verbose ('sliver_vs: %s init'%name)
+ logger.verbose ('sliver_vs: {} init'.format(name))
try:
- logger.log("sliver_vs: %s: first chance..."%name)
+ logger.log("sliver_vs: {}: first chance...".format(name))
vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
Account.__init__ (self, name)
Initscript.__init__ (self, name)
except Exception, err:
if not isinstance(err, vserver.NoSuchVServer):
# Probably a bad vserver or vserver configuration file
- logger.log_exc("sliver_vs:__init__ (first chance) %s", name=name)
- logger.log('sliver_vs: %s: recreating bad vserver' % name)
+ logger.log_exc("sliver_vs:__init__ (first chance)", name=name)
+ logger.log('sliver_vs: {}: recreating bad vserver'.format(name))
self.destroy(name)
self.create(name, rec)
vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
@staticmethod
def create(name, rec = None):
- logger.verbose('sliver_vs: %s: create'%name)
+ logger.verbose('sliver_vs: {}: create'.format(name))
vref = rec['vref']
if vref is None:
# added by caglar
# band-aid for short period as old API doesn't have GetSliceFamily function
vref = "planetlab-f8-i386"
- logger.log("sliver_vs: %s: ERROR - no vref attached, using hard-wired default %s"%(name, vref))
+ logger.log("sliver_vs: {}: ERROR - no vref attached, using hard-wired default {}"
+ .format(name, vref))
# used to look in /etc/planetlab/family,
# now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
# which for legacy is still exposed here as the 'vref' key
# check the template exists -- there's probably a better way..
- if not os.path.isdir ("/vservers/.vref/%s"%vref):
- logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name, vref))
+ if not os.path.isdir ("/vservers/.vref/{}".format(vref)):
+ logger.log ("sliver_vs: {}: ERROR Could not create sliver - vreference image {} not found"
+ .format(name, vref))
return
# compute guest personality
command += [ name, ]
logger.log_call(command, timeout=15*60)
# export slicename to the slice in /etc/slicename
- file('/vservers/%s/etc/slicename' % name, 'w').write(name)
- file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
+ with open('/vservers/{}/etc/slicename'.format(name), 'w') as slicenamefile:
+ slicenamefile.write(name)
+ with open('/vservers/{}/etc/slicefamily'.format(name), 'w') as slicefamilyfile:
+ slicefamilyfile.write(vref)
# set personality: only if needed (if arch's differ)
if tools.root_context_arch() != arch:
- file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
- logger.log('sliver_vs: %s: set personality to %s'%(name, personality(arch)))
+ with open('/etc/vservers/{}/personality'.format(name), 'w') as personalityfile:
+ personalityfile.write(personality(arch)+"\n")
+ logger.log('sliver_vs: {}: set personality to {}'.format(name, personality(arch)))
@staticmethod
def destroy(name):
# also because this is a static method we cannot check for 'omf_control'
# but it is no big deal as umount_ssh_dir checks before it umounts..
Account.umount_ssh_dir(name)
- logger.log("sliver_vs: destroying %s"%name)
+ logger.log("sliver_vs: destroying {}".format(name))
logger.log_call(['/bin/bash', '-x', '/usr/sbin/vuserdel', name, ])
# is expected to be in place already at this point
def start(self, delay=0):
if self.rspec['enabled'] <= 0:
- logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
+ logger.log('sliver_vs: not starting {}, is not enabled'.format(self.name))
return
- logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
+ logger.log('sliver_vs: {}: starting in {} seconds'.format(self.name, delay))
time.sleep(delay)
# the generic /etc/init.d/vinit script is permanently refreshed, and enabled
self.install_and_enable_vinit()
os.waitpid(child_pid, 0)
def stop(self):
- logger.log('sliver_vs: %s: stopping' % self.name)
+ logger.log('sliver_vs: {}: stopping'.format(self.name))
vserver.VServer.stop(self)
def is_running(self):
# but actually depends on the underlying vm techno
# so let's keep it here
def rerun_slice_vinit(self):
- command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
- logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
+ command = "/usr/sbin/vserver {} exec /etc/rc.d/init.d/vinit restart"\
+ .format(self.name)
+ logger.log("vsliver_vs: {}: Rerunning slice initscript: {}"
+ .format(self.name, command))
subprocess.call(command + "&", stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
def set_resources(self):
disk_max = self.rspec['disk_max']
- logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
+ logger.log('sliver_vs: {}: setting max disk usage to {} KiB'
+ .format(self.name, disk_max))
try: # if the sliver is over quota, .set_disk_limit will throw an exception
if not self.disk_usage_initialized:
self.vm_running = False
Sliver_VS._init_disk_info_sem.acquire()
- logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
+ logger.log('sliver_vs: {}: computing disk usage: beginning'.format(self.name))
# init_disk_info is inherited from VServer
try: self.init_disk_info()
finally: Sliver_VS._init_disk_info_sem.release()
- logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
+ logger.log('sliver_vs: {}: computing disk usage: ended'.format(self.name))
self.disk_usage_initialized = True
vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
except:
# implements support for hard limits.
for limit in vserver.RLIMITS.keys():
type = limit.lower()
- minimum = self.rspec['%s_min'%type]
- soft = self.rspec['%s_soft'%type]
- hard = self.rspec['%s_hard'%type]
+ minimum = self.rspec['{}_min'.format(type)]
+ soft = self.rspec['{}_soft'.format(type)]
+ hard = self.rspec['{}_hard'.format(type)]
update = self.set_rlimit(limit, hard, soft, minimum)
if update:
- logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
- % (self.name, type, hard, soft, minimum))
+ logger.log('sliver_vs: {}: setting rlimit {} to ({}, {}, {})'
+ .format(self.name, type, hard, soft, minimum))
self.set_capabilities_config(self.rspec['capabilities'])
if self.rspec['capabilities']:
- logger.log('sliver_vs: %s: setting capabilities to %s'
- % (self.name, self.rspec['capabilities']))
+ logger.log('sliver_vs: {}: setting capabilities to {}'
+ .format(self.name, self.rspec['capabilities']))
cpu_pct = self.rspec['cpu_pct']
cpu_share = self.rspec['cpu_share']
sysctl = key.split('.')
try:
# /etc/vservers/<guest>/sysctl/<id>/
- dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
+ dirname = "/etc/vservers/{}/sysctl/{}".format(self.name, count)
try:
os.makedirs(dirname, 0755)
except:
pass
- setting = open("%s/setting" % dirname, "w")
- setting.write("%s\n" % key.lstrip("sysctl."))
- setting.close()
- value = open("%s/value" % dirname, "w")
- value.write("%s\n" % self.rspec[key])
- value.close()
+ with open("{}/setting".format(dirname), "w") as setting:
+ setting.write("{}\n".format(key.lstrip("sysctl.")))
+ with open("{}/value".format(dirname), "w") as value:
+ value.write("{}\n".format(self.rspec[key]))
count += 1
- logger.log("sliver_vs: %s: writing %s=%s"%(self.name, key, self.rspec[key]))
+ logger.log("sliver_vs: {}: writing {}={}"
+ .format(self.name, key, self.rspec[key]))
except IOError, e:
- logger.log("sliver_vs: %s: could not set %s=%s"%(self.name, key, self.rspec[key]))
- logger.log("sliver_vs: %s: error = %s"%(self.name, e))
+ logger.log("sliver_vs: {}: could not set {}={}"
+ .format(self.name, key, self.rspec[key]))
+ logger.log("sliver_vs: {}: error = {}".format(self.name, e))
if self.rspec['enabled'] > 0:
if cpu_pct > 0:
- logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
+ logger.log('sliver_vs: {}: setting cpu reservation to {}%'
+ .format(self.name, cpu_pct))
else:
cpu_pct = 0
if cpu_share > 0:
- logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
+ logger.log('sliver_vs: {}: setting cpu share to {}'
+ .format(self.name, cpu_share))
else:
cpu_share = 0
self.set_sched_config(cpu_pct, cpu_share)
# if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
if self.rspec['ip_addresses'] != '0.0.0.0':
- logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
- (self.name, self.rspec['ip_addresses']))
+ logger.log('sliver_vs: {}: setting IP address(es) to {}'
+ .format(self.name, self.rspec['ip_addresses']))
add_loopback = True
if 'isolate_loopback' in self.rspec['tags']:
add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
- #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
+ #logger.log("sliver_vs: {}: Setting name to {}".format(self.name, self.slice_id))
#self.setname(self.slice_id)
- #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
+ #logger.log("sliver_vs: {}: Storing slice id of {} for PlanetFlow".format(self.name, self.slice_id))
try:
- vserver_config_path = '/etc/vservers/%s'%self.name
+ vserver_config_path = '/etc/vservers/{}'.format(self.name)
if not os.path.exists (vserver_config_path):
os.makedirs (vserver_config_path)
- file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
- logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id, self.name))
+ with open('{}/slice_id'.format(vserver_config_path), 'w') as sliceidfile:
+ sliceidfile.write("{}\n".format(self.slice_id))
+ logger.log("sliver_vs: Recorded slice id {} for slice {}"
+ .format(self.slice_id, self.name))
except IOError as e:
- logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name, str(e)))
+ logger.log("sliver_vs: Could not record slice_id for slice {}. Error: {}"
+ .format(self.name, str(e)))
except Exception as e:
- logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e), name=self.name)
+ logger.log_exc("sliver_vs: Error recording slice id: {}".format(e), name=self.name)
if self.enabled == False:
if False: # Does not work properly yet.
if self.have_limits_changed():
- logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
+ logger.log('sliver_vs: {}: limits have changed --- restarting'.format(self.name))
stopcount = 10
while self.is_running() and stopcount > 0:
self.stop()
self.start()
else: # tell vsh to disable remote login by setting CPULIMIT to 0
- logger.log('sliver_vs: %s: disabling remote login' % self.name)
+ logger.log('sliver_vs: {}: disabling remote login'.format(self.name))
self.set_sched_config(0, 0)
self.enabled = False
self.stop()
returns True if a change occurred, or the file is deleted
"""
try:
- current=file(target).read()
+ current = file(target).read()
except:
- current=""
- if current==new_contents:
+ current = ""
+ if current == new_contents:
# if turns out to be an empty string, and remove_if_empty is set,
# then make sure to trash the file if it exists
if remove_if_empty and not new_contents and os.path.isfile(target):
- logger.verbose("tools.replace_file_with_string: removing file %s"%target)
+ logger.verbose("tools.replace_file_with_string: removing file {}".format(target))
try: os.unlink(target)
finally: return True
return False
# overwrite target file: create a temp in the same directory
- path=os.path.dirname(target) or '.'
- fd, name = tempfile.mkstemp('','repl',path)
- os.write(fd,new_contents)
+ path = os.path.dirname(target) or '.'
+ fd, name = tempfile.mkstemp('', 'repl', path)
+ os.write(fd, new_contents)
os.close(fd)
if os.path.exists(target):
os.unlink(target)
- shutil.move(name,target)
- if chmod: os.chmod(target,chmod)
+ shutil.move(name, target)
+ if chmod: os.chmod(target, chmod)
return True
####################
# utilities functions to get (cached) information from the node
# get node_id from /etc/planetlab/node_id and cache it
-_node_id=None
+_node_id = None
def node_id():
global _node_id
if _node_id is None:
try:
- _node_id=int(file("/etc/planetlab/node_id").read())
+ _node_id = int(file("/etc/planetlab/node_id").read())
except:
- _node_id=""
+ _node_id = ""
return _node_id
-_root_context_arch=None
+_root_context_arch = None
def root_context_arch():
global _root_context_arch
if not _root_context_arch:
- sp=subprocess.Popen(["uname","-i"],stdout=subprocess.PIPE)
- (_root_context_arch,_)=sp.communicate()
- _root_context_arch=_root_context_arch.strip()
+ sp = subprocess.Popen(["uname", "-i"], stdout=subprocess.PIPE)
+ (_root_context_arch, _) = sp.communicate()
+ _root_context_arch = _root_context_arch.strip()
return _root_context_arch
####################
class NMLock:
def __init__(self, file):
- logger.log("tools: Lock %s initialized." % file, 2)
+ logger.log("tools: Lock {} initialized.".format(file), 2)
self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0600)
flags = fcntl.fcntl(self.fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
the process. If the process is not found then (None, None) is returned.
"""
try:
- cmd = 'grep %s /proc/*/cgroup | grep freezer'%slice_name
+ cmd = 'grep {} /proc/*/cgroup | grep freezer'.format(slice_name)
output = os.popen(cmd).readlines()
except:
# the slice couldn't be found
- logger.log("get_sliver_process: couldn't find slice %s" % slice_name)
+ logger.log("get_sliver_process: couldn't find slice {}".format(slice_name))
return (None, None)
cgroup_fn = None
# /proc/1253/cgroup:6:freezer:/machine.slice/machine-lxc\x2del_sirius.scope
# Further documentation on:
# https://libvirt.org/cgroups.html#systemdScope
- virt=get_node_virt()
- if virt=='lxc':
+ virt = get_node_virt()
+ if virt == 'lxc':
# This is for Fedora 20 or later
regexf20orlater = re.compile(r'machine-lxc\\x2d(.+).scope')
isf20orlater = regexf20orlater.search(slice_name_check)
if (slice_name_check == slice_name):
slice_path = path
pid = slice_path.split('/')[2]
- cmdline = open('/proc/%s/cmdline'%pid).read().rstrip('\n\x00')
+ with open('/proc/{}/cmdline'.format(pid)) as cmdfile:
+ cmdline = cmdfile.read().rstrip('\n\x00')
if (cmdline == process_cmdline):
cgroup_fn = slice_path
break
break
if (not cgroup_fn) or (not pid):
- logger.log("get_sliver_process: process %s not running in slice %s" % (process_cmdline, slice_name))
+ logger.log("get_sliver_process: process {} not running in slice {}"
+ .format(process_cmdline, slice_name))
return (None, None)
return (cgroup_fn, pid)
if (not cgroup_fn) or (not pid):
return None
- path = '/proc/%s/ns/net'%pid
+ path = '/proc/{}/ns/net'.format(pid)
result = None
try:
sub.wait()
if (sub.returncode != 0):
- logger.log("get_slice_ifconfig: error in ifconfig: %s" % sub.stderr.read())
+ logger.log("get_slice_ifconfig: error in ifconfig: {}".format(sub.stderr.read()))
result = sub.stdout.read()
finally:
if "inet addr:" in line:
# example: ' inet addr:192.168.122.189 Bcast:192.168.122.255 Mask:255.255.255.0'
parts = line.strip().split()
- if len(parts)>=2 and parts[1].startswith("addr:"):
+ if len(parts) >= 2 and parts[1].startswith("addr:"):
return parts[1].split(":")[1]
return None
def get_sliver_ipv6(slice_name):
ifconfig = get_sliver_ifconfig(slice_name)
if not ifconfig:
- return None,None
+ return None, None
# example: 'inet6 2001:67c:16dc:1302:5054:ff:fea7:7882 prefixlen 64 scopeid 0x0<global>'
prog = re.compile(r'inet6\s+(.*)\s+prefixlen\s+(\d+)\s+scopeid\s+(.+)<global>')
if search:
ipv6addr = search.group(1)
prefixlen = search.group(2)
- return (ipv6addr,prefixlen)
- return None,None
+ return (ipv6addr, prefixlen)
+ return None, None
###################################################
# Author: Guilherme Sperb Machado <gsm@machados.org>
# either 'vs' or 'lxc'
# also caches it in /etc/planetlab/virt for next calls
# could be promoted to core nm if need be
-virt_stamp="/etc/planetlab/virt"
+virt_stamp = "/etc/planetlab/virt"
def get_node_virt ():
try:
return file(virt_stamp).read().strip()
pass
logger.log("Computing virt..")
try:
- if subprocess.call ([ 'vserver', '--help' ]) ==0: virt='vs'
- else: virt='lxc'
+ virt = 'vs' if subprocess.call ([ 'vserver', '--help' ]) == 0 else 'lxc'
except:
virt='lxc'
- with file(virt_stamp,"w") as f:
+ with file(virt_stamp, "w") as f:
f.write(virt)
return virt
### this return True or False to indicate that systemctl is present on that box
# cache result in memory as _has_systemctl
-_has_systemctl=None
+_has_systemctl = None
def has_systemctl ():
global _has_systemctl
if _has_systemctl is None:
try:
# set the flag VIR_DOMAIN_REBOOT_INITCTL, which uses "initctl"
result = domain.reboot(0x04)
- if result==0: logger.log("tools: REBOOT %s" % (domain.name()) )
+ if result == 0:
+ logger.log("tools: REBOOT {}".format(domain.name()) )
else:
raise Exception()
except Exception, e:
- logger.log("tools: FAILED to reboot %s (%s)" % (domain.name(), e) )
- logger.log("tools: Trying to DESTROY/CREATE %s instead..." % (domain.name()) )
+ logger.log("tools: FAILED to reboot {} ({})".format(domain.name(), e) )
+ logger.log("tools: Trying to DESTROY/CREATE {} instead...".format(domain.name()) )
try:
result = domain.destroy()
- if result==0: logger.log("tools: DESTROYED %s" % (domain.name()) )
- else: logger.log("tools: FAILED in the DESTROY call of %s" % (domain.name()) )
+ if result==0:
+ logger.log("tools: DESTROYED {}".format(domain.name()) )
+ else: logger.log("tools: FAILED in the DESTROY call of {}".format(domain.name()) )
result = domain.create()
- if result==0: logger.log("tools: CREATED %s" % (domain.name()) )
- else: logger.log("tools: FAILED in the CREATE call of %s" % (domain.name()) )
+ if result==0:
+ logger.log("tools: CREATED {}".format(domain.name()) )
+ else: logger.log("tools: FAILED in the CREATE call of {}".format(domain.name()) )
except Exception, e:
- logger.log("tools: FAILED to DESTROY/CREATE %s (%s)" % (domain.name(), e) )
+ logger.log("tools: FAILED to DESTROY/CREATE {} ({})".format(domain.name(), e) )
###################################################
# Author: Guilherme Sperb Machado <gsm@machados.org>
###################################################
def search_ipv6addr_hosts(slicename, ipv6addr):
hostsFilePath = get_hosts_file_path(slicename)
- found=False
+ found = False
try:
- for line in fileinput.input(r'%s' % (hostsFilePath)):
+ for line in fileinput.input(r'{}'.format(hostsFilePath)):
if ipv6addr is not None:
- if re.search(r'%s' % (ipv6addr), line):
- found=True
+ if re.search(r'{}'.format(ipv6addr), line):
+ found = True
else:
search = re.search(r'^(.*)\s+.*$', line)
if search:
ipv6candidatestrip = ipv6candidate.strip()
valid = is_valid_ipv6(ipv6candidatestrip)
if valid:
- found=True
+ found = True
fileinput.close()
return found
except:
- logger.log("tools: FAILED to search %s in /etc/hosts file of slice=%s" % \
- (ipv6addr, slicename) )
+ logger.log("tools: FAILED to search {} in /etc/hosts file of slice={}"
+ .format(ipv6addr, slicename))
###################################################
# Author: Guilherme Sperb Machado <gsm@machados.org>
def remove_all_ipv6addr_hosts(slicename, node):
hostsFilePath = get_hosts_file_path(slicename)
try:
- for line in fileinput.input(r'%s' % (hostsFilePath), inplace=True):
- search = re.search(r'^(.*)\s+(%s|%s)$' % (node,'localhost'), line)
+ for line in fileinput.input(r'{}'.format(hostsFilePath), inplace=True):
+ search = re.search(r'^(.*)\s+({}|{})$'.format(node, 'localhost'), line)
if search:
ipv6candidate = search.group(1)
ipv6candidatestrip = ipv6candidate.strip()
if not valid:
print line,
fileinput.close()
- logger.log("tools: REMOVED IPv6 address from /etc/hosts file of slice=%s" % \
- (slicename) )
+ logger.log("tools: REMOVED IPv6 address from /etc/hosts file of slice={}"
+ .format(slicename) )
except:
- logger.log("tools: FAILED to remove the IPv6 address from /etc/hosts file of slice=%s" % \
- (slicename) )
+ logger.log("tools: FAILED to remove the IPv6 address from /etc/hosts file of slice={}"
+ .format(slicename) )
###################################################
# Author: Guilherme Sperb Machado <gsm@machados.org>
###################################################
def add_ipv6addr_hosts_line(slicename, node, ipv6addr):
hostsFilePath = get_hosts_file_path(slicename)
- logger.log("tools: %s" % (hostsFilePath) )
+ logger.log("tools: {}".format(hostsFilePath) )
# debugging purposes:
#string = "127.0.0.1\tlocalhost\n192.168.100.179\tmyplc-node1-vm.mgmt.local\n"
#string = "127.0.0.1\tlocalhost\n"
with open(hostsFilePath, "a") as file:
file.write(ipv6addr + " " + node + "\n")
file.close()
- logger.log("tools: ADDED IPv6 address to /etc/hosts file of slice=%s" % \
- (slicename) )
+ logger.log("tools: ADDED IPv6 address to /etc/hosts file of slice={}"
+ .format(slicename) )
except:
- logger.log("tools: FAILED to add the IPv6 address to /etc/hosts file of slice=%s" % \
- (slicename) )
+ logger.log("tools: FAILED to add the IPv6 address to /etc/hosts file of slice={}"
+ .format(slicename) )
# which, OK, is no big deal as long as the command is simple enough,
# but do not stretch it with arguments that have spaces or need quoting as that will become a nightmare
def command_in_slice (slicename, argv):
- virt=get_node_virt()
- if virt=='vs':
+ virt = get_node_virt()
+ if virt == 'vs':
return [ 'vserver', slicename, 'exec', ] + argv
- elif virt=='lxc':
+ elif virt == 'lxc':
# wrap up argv in a single string for -c
return [ 'lxcsu', slicename, ] + [ " ".join(argv) ]
logger.log("command_in_slice: WARNING: could not find a valid virt")
####################
def init_signals ():
def handler (signum, frame):
- logger.log("Received signal %d - exiting"%signum)
+ logger.log("Received signal {} - exiting".format(signum))
os._exit(1)
- signal.signal(signal.SIGHUP,handler)
- signal.signal(signal.SIGQUIT,handler)
- signal.signal(signal.SIGINT,handler)
- signal.signal(signal.SIGTERM,handler)
+ signal.signal(signal.SIGHUP, handler)
+ signal.signal(signal.SIGQUIT, handler)
+ signal.signal(signal.SIGINT, handler)
+ signal.signal(signal.SIGTERM, handler)