# write out authorized_keys file and conditionally create
# the .ssh subdir if need be.
- dot_ssh = os.path.join(pw_dir,'.ssh')
+ dot_ssh = os.path.join(pw_dir, '.ssh')
if not os.path.isdir(dot_ssh):
if not os.path.isdir(pw_dir):
- logger.verbose('account: WARNING: homedir %s does not exist for %s!'%(pw_dir,self.name))
+ logger.verbose('account: WARNING: homedir %s does not exist for %s!'%(pw_dir, self.name))
os.mkdir(pw_dir)
os.chown(pw_dir, uid, gid)
os.mkdir(dot_ssh)
- auth_keys = os.path.join(dot_ssh,'authorized_keys')
+ auth_keys = os.path.join(dot_ssh, 'authorized_keys')
tools.write_file(auth_keys, lambda f: f.write(new_keys))
# set access permissions and ownership properly
def _manage_ssh_dir (slicename, do_mount):
logger.log ("_manage_ssh_dir, requested to "+("mount" if do_mount else "umount")+" ssh dir for "+ slicename)
try:
- root_ssh="/home/%s/.ssh"%slicename
- sliver_ssh="/vservers/%s/home/%s/.ssh"%(slicename,slicename)
+ root_ssh = "/home/%s/.ssh"%slicename
+ sliver_ssh = "/vservers/%s/home/%s/.ssh"%(slicename, slicename)
def is_mounted (root_ssh):
for mount_line in file('/proc/mounts').readlines():
- if mount_line.find (root_ssh)>=0: return True
+ if mount_line.find (root_ssh) >= 0:
+ return True
return False
if do_mount:
# any of both might not exist yet
- for path in [root_ssh,sliver_ssh]:
+ for path in [root_ssh, sliver_ssh]:
if not os.path.exists (path):
os.mkdir(path)
if not os.path.isdir (path):
raise Exception
if not is_mounted(root_ssh):
- command=['mount','--bind','-o','ro',root_ssh,sliver_ssh]
- mounted=logger.log_call (command)
- msg="OK" if mounted else "WARNING: FAILED"
- logger.log("_manage_ssh_dir: mounted %s into slice %s - %s"%(root_ssh,slicename,msg))
+ command = ['mount', '--bind', '-o', 'ro', root_ssh, sliver_ssh]
+ mounted = logger.log_call (command)
+ msg = "OK" if mounted else "WARNING: FAILED"
+ logger.log("_manage_ssh_dir: mounted %s into slice %s - %s"%(root_ssh, slicename, msg))
else:
if is_mounted (sliver_ssh):
- command=['umount',sliver_ssh]
- umounted=logger.log_call(command)
- msg="OK" if umounted else "WARNING: FAILED"
- logger.log("_manage_ssh_dir: umounted %s - %s"%(sliver_ssh,msg))
+ command = ['umount', sliver_ssh]
+ umounted = logger.log_call(command)
+ msg = "OK" if umounted else "WARNING: FAILED"
+ logger.log("_manage_ssh_dir: umounted %s - %s"%(sliver_ssh, msg))
except:
- logger.log_exc("_manage_ssh_dir failed",name=slicename)
+ logger.log_exc("_manage_ssh_dir failed", name=slicename)
class Worker:
Check account type is still valid. If not, recreate sliver.
If still valid, check if running and configure/start if not.
"""
- logger.log_data_in_file(rec,"/var/lib/nodemanager/%s.rec.txt"%rec['name'],
- 'raw rec captured in ensure_created',logger.LOG_VERBOSE)
+ logger.log_data_in_file(rec, "/var/lib/nodemanager/%s.rec.txt"%rec['name'],
+ 'raw rec captured in ensure_created', logger.LOG_VERBOSE)
curr_class = self._get_class()
next_class = type_acct_class[rec['type']]
if next_class != curr_class:
try: next_class.create(self.name, rec)
finally: create_sem.release()
if not isinstance(self._acct, next_class): self._acct = next_class(rec)
- logger.verbose("account.Worker.ensure_created: %s, running=%r"%(self.name,self.is_running()))
+ logger.verbose("account.Worker.ensure_created: %s, running=%r"%(self.name, self.is_running()))
# reservation_alive is set on reservable nodes, and its value is a boolean
if 'reservation_alive' in rec:
# NOTE: modules listed here will also be loaded in this order
# once loaded, they get re-ordered after their priority (lower comes first)
# for determining the runtime order
- core_modules=['net', 'conf_files', 'slivermanager', 'bwmon']
+ core_modules = ['net', 'conf_files', 'slivermanager', 'bwmon']
- default_period=600
- default_random=301
- default_priority=100
+ default_period = 600
+ default_random = 301
+ default_priority = 100
def __init__ (self):
self.modules += plugins
if self.options.user_module:
assert self.options.user_module in self.modules
- self.modules=[self.options.user_module]
+ self.modules = [self.options.user_module]
logger.verbose('nodemanager: Running single module %s'%self.options.user_module)
# log it for debug purposes, no matter what verbose is
logger.log_slivers(data)
logger.verbose("nodemanager: Sync w/ PLC done")
- last_data=data
+ last_data = data
except:
logger.log_exc("nodemanager: failed in GetSlivers")
# XXX So some modules can at least boostrap.
logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.")
data = {}
# for modules that request it though the 'persistent_data' property
- last_data=self.loadSlivers()
+ last_data = self.loadSlivers()
# Invoke GetSlivers() functions from the callback modules
for module in self.loaded_modules:
logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__)
try:
callback = getattr(module, 'GetSlivers')
- module_data=data
+ module_data = data
if getattr(module,'persistent_data',False):
- module_data=last_data
+ module_data = last_data
callback(data, config, plc)
except SystemExit as e:
sys.exit(e)
Get PLC wide defaults from _default system slice. Adds them to config class.
"""
for slice in data.get('slivers'):
- if slice['name'] == config.PLC_SLICE_PREFIX+"_default":
+ if slice['name'] == config.PLC_SLICE_PREFIX + "_default":
attr_dict = {}
for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value']
if len(attr_dict):
# It is safe to override the attributes with this, as this method has the right logic
for sliver in data.get('slivers'):
try:
- slicefamily=sliver.get('GetSliceFamily')
+ slicefamily = sliver.get('GetSliceFamily')
for att in sliver['attributes']:
- if att['tagname']=='vref':
- att['value']=slicefamily
+ if att['tagname'] == 'vref':
+ att['value'] = slicefamily
continue
sliver['attributes'].append({ 'tagname':'vref','value':slicefamily})
except:
- logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",name=sliver['name'])
+ logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",
+ name=sliver['name'])
def dumpSlivers (self, slivers):
f = open(NodeManager.DB_FILE, "w")
# used e.g. in vsys-scripts's sliceip
tools.get_node_virt()
try:
- if self.options.daemon: tools.daemon()
+ if self.options.daemon:
+ tools.daemon()
# set log level
if (self.options.verbose):
for module in self.modules:
try:
m = __import__(module)
- logger.verbose("nodemanager: triggering %s.start"%m.__name__)
+ logger.verbose("nodemanager: triggering %s.start" % m.__name__)
try: m.start()
except: logger.log("WARNING: module %s did not start")
self.loaded_modules.append(m)
sys.exit(1)
# sort on priority (lower first)
- def sort_module_priority (m1,m2):
+ def sort_module_priority (m1, m2):
return getattr(m1,'priority',NodeManager.default_priority) - getattr(m2,'priority',NodeManager.default_priority)
self.loaded_modules.sort(sort_module_priority)
# get random periods
- iperiod=int(self.options.period)
- irandom=int(self.options.random)
+ iperiod = int(self.options.period)
+ irandom = int(self.options.random)
# Initialize XML-RPC client
plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=iperiod/2)
while True:
# Main nodemanager Loop
- work_beg=time.time()
+ work_beg = time.time()
logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom))
self.GetSlivers(config, plc)
- delay=iperiod + random.randrange(0,irandom)
- work_end=time.time()
- work_duration=int(work_end-work_beg)
+ delay = iperiod + random.randrange(0,irandom)
+ work_end = time.time()
+ work_duration = int(work_end-work_beg)
logger.log('nodemanager: mainloop has worked for %s s - sleeping for %d s'%(work_duration,delay))
time.sleep(delay)
except SystemExit:
@staticmethod
def dom_details (dom):
- output=""
- output += " id=%s - OSType=%s"%(dom.ID(),dom.OSType())
+ output = ""
+ output += " id=%s - OSType=%s"%(dom.ID(), dom.OSType())
# calling state() seems to be working fine
- (state,reason)=dom.state()
- output += " state=%s, reason=%s"%(STATES.get(state,state),REASONS.get(reason,reason))
+ (state, reason) = dom.state()
+ output += " state=%s, reason=%s"%(STATES.get(state, state),REASONS.get(reason, reason))
try:
# try to use info() - this however does not work for some reason on f20
# info cannot get info operation failed: Cannot read cputime for domain
def __repr__(self):
''' Helper method to get a "nice" output of the domain struct for debug purposes'''
- output="Domain %s"%self.name
- dom=self.dom
+ output = "Domain %s"%self.name
+ dom = self.dom
if dom is None:
output += " [no attached dom ?!?]"
else:
try:
self.dom.destroy()
except:
- logger.log_exc("in sliver_libvirt.stop",name=self.name)
+ logger.log_exc("in sliver_libvirt.stop", name=self.name)
def is_running(self):
''' Return True if the domain is running '''
- (state,_) = self.dom.state()
+ (state, _) = self.dom.state()
result = (state == libvirt.VIR_DOMAIN_RUNNING)
- logger.verbose('sliver_libvirt.is_running: %s => %s'%(self,result))
+ logger.verbose('sliver_libvirt.is_running: %s => %s'%(self, result))
return result
def configure(self, rec):
CON_BASE_DIR = '/vservers'
def __init__(self, rec):
- name=rec['name']
- Sliver_Libvirt.__init__(self,rec)
- Initscript.__init__(self,name)
+ name = rec['name']
+ Sliver_Libvirt.__init__(self, rec)
+ Initscript.__init__(self, name)
def configure(self, rec):
logger.log('========== sliver_lxc.configure {}'.format(self.name))
vref = rec['vref']
if vref is None:
vref = "lxc-f18-x86_64"
- logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name,vref))
+ logger.log("sliver_libvirt: %s: WARNING - no vref attached, using hard-wired default %s" % (name, vref))
# compute guest arch from vref
# essentially we want x86_64 (default) or i686 here for libvirt
try:
(x, y, arch) = vref.split('-')
- arch = "x86_64" if arch.find("64")>=0 else "i686"
+ arch = "x86_64" if arch.find("64") >= 0 else "i686"
except:
arch = 'x86_64'
# check the template exists -- there's probably a better way..
if not os.path.isdir(refImgDir):
- logger.log('sliver_lxc: %s: ERROR Could not create sliver - reference image %s not found' % (name,vref))
- logger.log('sliver_lxc: %s: ERROR Expected reference image in %s'%(name,refImgDir))
+ logger.log('sliver_lxc: %s: ERROR Could not create sliver - reference image %s not found' % (name, vref))
+ logger.log('sliver_lxc: %s: ERROR Expected reference image in %s'%(name, refImgDir))
return
# this hopefully should be fixed now
# # so we need to check the expected container rootfs does not exist yet
# # this hopefully could be removed in a future release
# if os.path.exists (containerDir):
-# logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name,containerDir))
+# logger.log("sliver_lxc: %s: WARNING cleaning up pre-existing %s"%(name, containerDir))
# command = ['btrfs', 'subvolume', 'delete', containerDir]
# logger.log_call(command, BTRFS_TIMEOUT)
# # re-check
# if os.path.exists (containerDir):
-# logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name,containerDir))
+# logger.log('sliver_lxc: %s: ERROR Could not create sliver - could not clean up empty %s'%(name, containerDir))
# return
# Snapshot the reference image fs
command = ['cp', '/home/%s/.ssh/id_rsa.pub'%name, '%s/root/.ssh/authorized_keys'%containerDir]
logger.log_call(command)
- logger.log("creating /etc/slicename file in %s" % os.path.join(containerDir,'etc/slicename'))
+ logger.log("creating /etc/slicename file in %s" % os.path.join(containerDir, 'etc/slicename'))
try:
- file(os.path.join(containerDir,'etc/slicename'), 'w').write(name)
+ file(os.path.join(containerDir, 'etc/slicename'), 'w').write(name)
except:
logger.log_exc("exception while creating /etc/slicename")
try:
- file(os.path.join(containerDir,'etc/slicefamily'), 'w').write(vref)
+ file(os.path.join(containerDir, 'etc/slicefamily'), 'w').write(vref)
except:
logger.log_exc("exception while creating /etc/slicefamily")
etcgroup = os.path.join(containerDir, 'etc/group')
if os.path.exists(etcpasswd):
# create all accounts with gid=1001 - i.e. 'slices' like it is in the root context
- slices_gid=1001
+ slices_gid = 1001
logger.log("adding user %(name)s id %(uid)d gid %(slices_gid)d to %(etcpasswd)s" % (locals()))
try:
- file(etcpasswd,'a').write("%(name)s:x:%(uid)d:%(slices_gid)d::/home/%(name)s:/bin/bash\n" % locals())
+ file(etcpasswd, 'a').write("%(name)s:x:%(uid)d:%(slices_gid)d::/home/%(name)s:/bin/bash\n" % locals())
except:
logger.log_exc("exception while updating %s"%etcpasswd)
logger.log("adding group slices with gid %(slices_gid)d to %(etcgroup)s"%locals())
try:
- file(etcgroup,'a').write("slices:x:%(slices_gid)d\n"%locals())
+ file(etcgroup, 'a').write("slices:x:%(slices_gid)d\n"%locals())
except:
logger.log_exc("exception while updating %s"%etcgroup)
sudoers = os.path.join(containerDir, 'etc/sudoers')
if os.path.exists(sudoers):
try:
- file(sudoers,'a').write("%s ALL=(ALL) NOPASSWD: ALL\n" % name)
+ file(sudoers, 'a').write("%s ALL=(ALL) NOPASSWD: ALL\n" % name)
except:
logger.log_exc("exception while updating /etc/sudoers")
# we save the whole business in /etc/planetlab.profile
# and source this file for both root and the slice uid's .profile
# prompt for slice owner, + LD_PRELOAD for transparently wrap bind
- pl_profile=os.path.join(containerDir,"etc/planetlab.profile")
- ld_preload_text="""# by default, we define this setting so that calls to bind(2),
+ pl_profile = os.path.join(containerDir, "etc/planetlab.profile")
+ ld_preload_text = """# by default, we define this setting so that calls to bind(2),
# when invoked on 0.0.0.0, get transparently redirected to the public interface of this node
# see https://svn.planet-lab.org/wiki/LxcPortForwarding"""
- usrmove_path_text="""# VM's before Features/UsrMove need /bin and /sbin in their PATH"""
- usrmove_path_code="""
+ usrmove_path_text = """# VM's before Features/UsrMove need /bin and /sbin in their PATH"""
+ usrmove_path_code = """
pathmunge () {
if ! echo $PATH | /bin/egrep -q "(^|:)$1($|:)" ; then
if [ "$2" = "after" ] ; then
pathmunge /sbin after
unset pathmunge
"""
- with open(pl_profile,'w') as f:
+ with open(pl_profile, 'w') as f:
f.write("export PS1='%s@\H \$ '\n"%(name))
f.write("%s\n"%ld_preload_text)
f.write("export LD_PRELOAD=/etc/planetlab/lib/bind_public.so\n")
# make sure this file is sourced from both root's and slice's .profile
enforced_line = "[ -f /etc/planetlab.profile ] && source /etc/planetlab.profile\n"
for path in [ 'root/.profile', 'home/%s/.profile'%name ]:
- from_root=os.path.join(containerDir,path)
+ from_root = os.path.join(containerDir, path)
# if dir is not yet existing let's forget it for now
if not os.path.isdir(os.path.dirname(from_root)): continue
- found=False
+ found = False
try:
- contents=file(from_root).readlines()
+ contents = file(from_root).readlines()
for content in contents:
- if content==enforced_line: found=True
- except IOError: pass
+ if content == enforced_line:
+ found = True
+ except IOError:
+ pass
if not found:
- with open(from_root,"a") as user_profile:
+ with open(from_root, "a") as user_profile:
user_profile.write(enforced_line)
# in case we create the slice's .profile when writing
- if from_root.find("/home")>=0:
- command=['chown','%s:slices'%name,from_root]
+ if from_root.find("/home") >= 0:
+ command = ['chown', '%s:slices'%name, from_root]
logger.log_call(command)
# Lookup for xid and create template after the user is created so we
xid = bwlimit.get_xid(name)
# Template for libvirt sliver configuration
- template_filename_sliceimage = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR,'lxc_template.xml')
+ template_filename_sliceimage = os.path.join(Sliver_LXC.REF_IMG_BASE_DIR, 'lxc_template.xml')
if os.path.isfile (template_filename_sliceimage):
logger.verbose("Using XML template %s"%template_filename_sliceimage)
- template_filename=template_filename_sliceimage
+ template_filename = template_filename_sliceimage
else:
logger.log("Cannot find XML template %s"%template_filename_sliceimage)
return
logger.log('sliver_lxc.destroy: %s cleanly destroyed.'%name)
else:
# we're in /
- #logger.log("-TMP-cwd %s : %s"%(name,os.getcwd()))
+ #logger.log("-TMP-cwd %s : %s"%(name, os.getcwd()))
# also lsof never shows anything relevant; this is painful..
#logger.log("-TMP-lsof %s"%name)
- #command=['lsof']
+ #command = ['lsof']
#logger.log_call(command)
logger.log("-TMP-ls-l %s"%name)
command = ['ls', '-lR', containerDir]
DEFAULT_ALLOCATION = {}
for rlimit in vserver.RLIMITS.keys():
rlim = rlimit.lower()
- DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
- DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
- DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_min"%rlim] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_soft"%rlim] = KEEP_LIMIT
+ DEFAULT_ALLOCATION["%s_hard"%rlim] = KEEP_LIMIT
class Sliver_VS(vserver.VServer, Account, Initscript):
"""This class wraps vserver.VServer to make its interface closer to what we need."""
_init_disk_info_sem = BoundedSemaphore()
def __init__(self, rec):
- name=rec['name']
+ name = rec['name']
logger.verbose ('sliver_vs: %s init'%name)
try:
logger.log("sliver_vs: %s: first chance..."%name)
- vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
+ vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
Account.__init__ (self, name)
Initscript.__init__ (self, name)
except Exception, err:
if not isinstance(err, vserver.NoSuchVServer):
# Probably a bad vserver or vserver configuration file
- logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
+ logger.log_exc("sliver_vs:__init__ (first chance) %s", name=name)
logger.log('sliver_vs: %s: recreating bad vserver' % name)
self.destroy(name)
self.create(name, rec)
- vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
+ vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
Account.__init__ (self, name)
Initscript.__init__ (self, name)
# added by caglar
# band-aid for short period as old API doesn't have GetSliceFamily function
vref = "planetlab-f8-i386"
- logger.log("sliver_vs: %s: ERROR - no vref attached, using hard-wired default %s"%(name,vref))
+ logger.log("sliver_vs: %s: ERROR - no vref attached, using hard-wired default %s"%(name, vref))
# used to look in /etc/planetlab/family,
# now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
# check the template exists -- there's probably a better way..
if not os.path.isdir ("/vservers/.vref/%s"%vref):
- logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
+ logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name, vref))
return
# compute guest personality
try:
- (x,y,arch)=vref.split('-')
+ (x, y, arch) = vref.split('-')
# mh, this of course applies when 'vref' is e.g. 'netflow'
# and that's not quite right
except:
- arch='i386'
+ arch = 'i386'
- def personality (arch): return "linux64" if arch.find("64") >=0 else "linux32"
+ def personality (arch):
+ return "linux64" if arch.find("64") >= 0 else "linux32"
- command=[]
+ command = []
# be verbose
- command += ['/bin/bash','-x',]
+ command += ['/bin/bash', '-x', ]
command += ['/usr/sbin/vuseradd', ]
if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
- command += [ "-i",]
+ command += [ "-i", ]
# the vsliver imge to use
command += [ '-t', vref, ]
# slice name
# set personality: only if needed (if arch's differ)
if tools.root_context_arch() != arch:
file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
- logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
+ logger.log('sliver_vs: %s: set personality to %s'%(name, personality(arch)))
@staticmethod
def destroy(name):
# but it is no big deal as umount_ssh_dir checks before it umounts..
Account.umount_ssh_dir(name)
logger.log("sliver_vs: destroying %s"%name)
- logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
+ logger.log_call(['/bin/bash', '-x', '/usr/sbin/vuserdel', name, ])
def configure(self, rec):
def rerun_slice_vinit(self):
command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
- subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
+ subprocess.call(command + "&", stdin=open('/dev/null', 'r'),
+ stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
def set_resources(self):
disk_max = self.rspec['disk_max']
self.disk_usage_initialized = True
vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
except:
- logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
+ logger.log_exc('sliver_vs: failed to set max disk usage', name=self.name)
# get/set the min/soft/hard values for all of the vserver
# related RLIMITS. Note that vserver currently only
self.set_capabilities_config(self.rspec['capabilities'])
if self.rspec['capabilities']:
- logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
+ logger.log('sliver_vs: %s: setting capabilities to %s'
+ % (self.name, self.rspec['capabilities']))
cpu_pct = self.rspec['cpu_pct']
cpu_share = self.rspec['cpu_share']
count = 1
for key in self.rspec.keys():
if key.find('sysctl.') == 0:
- sysctl=key.split('.')
+ sysctl = key.split('.')
try:
# /etc/vservers/<guest>/sysctl/<id>/
dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
value.close()
count += 1
- logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
+ logger.log("sliver_vs: %s: writing %s=%s"%(self.name, key, self.rspec[key]))
except IOError, e:
- logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
- logger.log("sliver_vs: %s: error = %s"%(self.name,e))
+ logger.log("sliver_vs: %s: could not set %s=%s"%(self.name, key, self.rspec[key]))
+ logger.log("sliver_vs: %s: error = %s"%(self.name, e))
if self.rspec['enabled'] > 0:
if not os.path.exists (vserver_config_path):
os.makedirs (vserver_config_path)
file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
- logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
- except IOError,e:
- logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
- except Exception,e:
- logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
+ logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id, self.name))
+ except IOError as e:
+ logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name, str(e)))
+ except Exception as e:
+ logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e), name=self.name)
if self.enabled == False: