period, \
default_MaxRate, \
default_Maxi2Rate, \
- default_MaxKByte,\
- default_Maxi2KByte,\
+ default_MaxKByte, \
+ default_Maxi2KByte, \
default_Share, \
dev_default
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
- logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE))
+ logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(), DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
try:
uid = pwd.getpwnam(cf_rec['file_owner'])[2]
except:
- logger.log('conf_files: cannot find user %s -- %s not updated'%(cf_rec['file_owner'],dest))
+ logger.log('conf_files: cannot find user %s -- %s not updated'%(cf_rec['file_owner'], dest))
return
try:
gid = grp.getgrnam(cf_rec['file_group'])[2]
except:
- logger.log('conf_files: cannot find group %s -- %s not updated'%(cf_rec['file_group'],dest))
+ logger.log('conf_files: cannot find group %s -- %s not updated'%(cf_rec['file_group'], dest))
return
url = 'https://%s/%s' % (self.config.PLC_BOOT_HOST, cf_rec['source'])
# set node_id at the end of the request - hacky
try:
logger.verbose("conf_files: retrieving URL=%s"%url)
contents = curlwrapper.retrieve(url, self.config.cacert)
- except xmlrpclib.ProtocolError,e:
+ except xmlrpclib.ProtocolError as e:
logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
return
if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest):
logger.log('conf_files: installing file %s from %s' % (dest, url))
try: os.makedirs(os.path.dirname(dest))
except OSError: pass
- tools.write_file(dest, lambda f: f.write(contents), mode=mode, uidgid=(uid,gid))
+ tools.write_file(dest, lambda f: f.write(contents), mode=mode, uidgid=(uid, gid))
if self.system(cf_rec['postinstall_cmd']): self.system(err_cmd)
def run_once(self, data):
try: self.update_conf_file(f)
except: logger.log_exc("conf_files: failed to update conf_file")
else:
- logger.log_missing_data("conf_files.run_once",'conf_files')
+ logger.log_missing_data("conf_files.run_once", 'conf_files')
def start(): pass
if __name__ == '__main__':
from pprint import pprint
- for (k,v) in Config().__dict__.iteritems():
+ for (k, v) in Config().__dict__.iteritems():
if k not in ['__builtins__']:
- pprint ( (k,v), )
+ pprint ( (k, v), )
lastCpu = cpu
logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name)
- reservations[name] = reservations.get(name,[]) + [cpu]
+ reservations[name] = reservations.get(name, []) + [cpu]
# now find a memory node to go with the cpu
if memSchedule:
if mem != None:
mems.remove(mem)
logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name)
- mem_reservations[name] = mem_reservations.get(name,[]) + [mem]
+ mem_reservations[name] = mem_reservations.get(name, []) + [mem]
else:
logger.log("CoreSched: failed to find memory node for cpu" + str(cpu))
# note that if a reservation is [], then we don't need to add
# bestEffort cores to it, since it is bestEffort by default.
- if reservations.get(name,[]) != []:
+ if reservations.get(name, []) != []:
reservations[name] = reservations[name] + reservations["_default"]
- mem_reservations[name] = mem_reservations.get(name,[]) + mem_reservations["_default"]
+ mem_reservations[name] = mem_reservations.get(name, []) + mem_reservations["_default"]
logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name]))
self.reserveUnits(self.cgroup_var_name, reservations)
print "cpus:", x.listToRange(x.get_cpus())
print "sibling map:"
for item in x.get_cpus():
- print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item,[])])
+ print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])
print "mems:", x.listToRange(x.get_mems())
print "cpu to memory map:"
for item in x.get_mems():
- print " ", item, ",".join([str(y) for y in x.mems_map.get(item,[])])
+ print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
lastCpu = cpu
logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name)
- reservations[name] = reservations.get(name,[]) + [cpu]
+ reservations[name] = reservations.get(name, []) + [cpu]
# now find a memory node to go with the cpu
if memSchedule:
if mem != None:
mems.remove(mem)
logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name)
- mem_reservations[name] = mem_reservations.get(name,[]) + [mem]
+ mem_reservations[name] = mem_reservations.get(name, []) + [mem]
else:
logger.log("CoreSched: failed to find memory node for cpu" + str(cpu))
# note that if a reservation is [], then we don't need to add
# bestEffort cores to it, since it is bestEffort by default.
- if reservations.get(name,[]) != []:
+ if reservations.get(name, []) != []:
reservations[name] = reservations[name] + reservations["_default"]
- mem_reservations[name] = mem_reservations.get(name,[]) + mem_reservations["_default"]
+ mem_reservations[name] = mem_reservations.get(name, []) + mem_reservations["_default"]
logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name]))
self.reserveUnits(self.cgroup_var_name, reservations)
if mems_map[self.mems[0]] == []:
work = []
for item in reversed(self.mems):
- if mems_map[item]!=[]:
+ if mems_map[item] != []:
work = mems_map[item]
else: # mems_map[item]==[]
mems_map[item] = work
return []
siblings = []
- x = int(open(fn,"rt").readline().strip(),16)
+ with open(fn, "rt") as f:
+ x = int(f.readline().strip(), 16)
cpuid = 0
while (x>0):
if (x&1)!=0:
print "cpus:", x.listToRange(x.get_cpus())
print "sibling map:"
for item in x.get_cpus():
- print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item,[])])
+ print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])
print "mems:", x.listToRange(x.get_mems())
print "cpu to memory map:"
for item in x.get_mems():
- print " ", item, ",".join([str(y) for y in x.mems_map.get(item,[])])
+ print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
if timeout:
command += ('--max-time', str(timeout))
command += ('--connect-timeout', str(timeout))
- command += (url,)
+ command += (url, )
if verbose:
- print 'Invoking ',command
- if postdata: print 'with postdata=',postdata
+ print 'Invoking ', command
+ if postdata: print 'with postdata=', postdata
p = Sopen(command , stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if postdata: p.stdin.write(postdata)
p.stdin.close()
- sout, sin, serr = select([p.stdout,p.stderr],[],[], timeout)
+ sout, sin, serr = select([p.stdout, p.stderr], [], [], timeout)
if len(sout) == 0 and len(sin) == 0 and len(serr) == 0:
logger.verbose("curlwrapper: timed out after %s" % timeout)
p.kill(signal.SIGKILL)
# we still need the other ones to be handled
try:
sliver = account.get(name)
- logger.verbose("database: sync : looping on %s (shell account class from pwd %s)" %(name,sliver._get_class()))
+ logger.verbose("database: sync : looping on %s (shell account class from pwd %s)" %(name, sliver._get_class()))
# Make sure we refresh accounts that are running
if rec['instantiation'] == 'plc-instantiated':
logger.verbose ("database: sync : ensure_create'ing 'instantiation' sliver %s"%name)
except SystemExit as e:
sys.exit(e)
except:
- logger.log_exc("database: sync failed to handle sliver",name=name)
+ logger.log_exc("database: sync failed to handle sliver", name=name)
# Wake up bwmom to update limits.
bwmon.lock.set()
# create symlink for runlevel 3
if not os.path.islink(enable_link):
try:
- logger.log("Initscript: %s: creating runlevel3 symlink %s" % (self.name,enable_link))
+ logger.log("Initscript: %s: creating runlevel3 symlink %s" % (self.name, enable_link))
os.symlink(enable_target, enable_link)
except:
logger.log_exc("Initscript failed to create runlevel3 symlink %s" % enable_link, name=self.name)
logger.log("Initscript: %s: creating enabling symlink %s" % (self.name, enable_link))
os.symlink(enable_target, enable_link)
except:
- logger.log_exc("Initscript failed to create enabling symlink %s" % enable_link,name=name)
+ logger.log_exc("Initscript failed to create enabling symlink %s" % enable_link, name=name)
# for some reason the various modules are still triggered even when the
# data from PLC cannot be reached
# we show this message instead of the exception stack instead in this case
-def log_missing_data(msg,key):
- log("%s: could not find the %s key in data (PLC connection down?) - IGNORED"%(msg,key))
+def log_missing_data(msg, key):
+ log("%s: could not find the %s key in data (PLC connection down?) - IGNORED"%(msg, key))
-def log_data_in_file(data, file, message="",level=LOG_NODE):
+def log_data_in_file(data, file, message="", level=LOG_NODE):
if level > LOG_LEVEL:
return
import pprint, time
try:
- f=open(file,'w')
- now=time.strftime("Last update: %Y.%m.%d at %H:%M:%S %Z", time.localtime())
- f.write(now+'\n')
- if message: f.write('Message:'+message+'\n')
- pp=pprint.PrettyPrinter(stream=f,indent=2)
- pp.pprint(data)
- f.close()
- verbose("logger:.log_data_in_file Owerwrote %s"%file)
+ with open(file, 'w') as f:
+ now=time.strftime("Last update: %Y.%m.%d at %H:%M:%S %Z", time.localtime())
+ f.write(now+'\n')
+ if message: f.write('Message:'+message+'\n')
+ pp=pprint.PrettyPrinter(stream=f, indent=2)
+ pp.pprint(data)
+ f.close()
+ verbose("logger:.log_data_in_file Owerwrote %s"%file)
except:
- log_exc('logger.log_data_in_file failed - file=%s - message=%r'%(file,message))
+ log_exc('logger.log_data_in_file failed - file=%s - message=%r'%(file, message))
def log_slivers(data):
log_data_in_file(data, LOG_SLIVERS, "raw GetSlivers")
self.buffer = ''
self.message = message
- def add(self,c):
+ def add(self, c):
self.buffer += c
if c=='\n':
self.flush()
break
# child has failed
else:
- log("log_call:end command (%s) returned with code %d" %(message,returncode))
+ log("log_call:end command (%s) returned with code %d" %(message, returncode))
break
# no : still within timeout ?
if time.time() >= trigger:
buffer.flush()
child.terminate()
- log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message,timeout))
+ log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message, timeout))
break
except:
log_exc("failed to run command %s" % message)
if not 'interfaces' in data:
# added by caglar
# band-aid for short period as old API returns networks instead of interfaces
- # logger.log_missing_data('net.GetSlivers','interfaces')
+ # logger.log_missing_data('net.GetSlivers', 'interfaces')
# return
if not 'networks' in data:
- logger.log_missing_data('net.GetSlivers','interfaces')
+ logger.log_missing_data('net.GetSlivers', 'interfaces')
return
else:
KEY_NAME = "networks"
if os.path.exists(self.options.path):
sys.path.append(self.options.path)
plugins = [ os.path.split(os.path.splitext(x)[0])[1]
- for x in glob.glob( os.path.join(self.options.path,'*.py') )
+ for x in glob.glob( os.path.join(self.options.path, '*.py') )
if not x.endswith("/__init__.py")
]
self.modules += plugins
try:
callback = getattr(module, 'GetSlivers')
module_data = data
- if getattr(module,'persistent_data',False):
+ if getattr(module, 'persistent_data', False):
module_data = last_data
callback(data, config, plc)
except SystemExit as e:
if att['tagname'] == 'vref':
att['value'] = slicefamily
continue
- sliver['attributes'].append({ 'tagname':'vref','value':slicefamily})
+ sliver['attributes'].append({ 'tagname':'vref', 'value':slicefamily})
except:
logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",
name=sliver['name'])
# sort on priority (lower first)
def module_priority (m):
- return getattr(m,'priority',NodeManager.default_priority)
+ return getattr(m, 'priority', NodeManager.default_priority)
self.loaded_modules.sort(key=module_priority)
logger.log('ordered modules:')
try:
plc.update_session()
logger.log("nodemanager: Authentication Failure. Retrying")
- except Exception,e:
+ except Exception as e:
logger.log("nodemanager: Retry Failed. ({}); Waiting..".format(e))
time.sleep(iperiod)
logger.log("nodemanager: Authentication Succeeded!")
logger.log('nodemanager: mainloop - calling GetSlivers - period={} random={}'
.format(iperiod, irandom))
self.GetSlivers(config, plc)
- delay = iperiod + random.randrange(0,irandom)
+ delay = iperiod + random.randrange(0, irandom)
work_end = time.time()
work_duration = int(work_end-work_beg)
logger.log('nodemanager: mainloop has worked for {} s - sleeping for {} s'
- .format(work_duration,delay))
+ .format(work_duration, delay))
time.sleep(delay)
except SystemExit:
pass
_writeconf = True
# Add to dict of codemuxslices. Make list to support more than one
# codemuxed host per slice.
- codemuxslices.setdefault(sliver['name'],[])
+ codemuxslices.setdefault(sliver['name'], [])
codemuxslices[sliver['name']].append(params)
except:
logger.log("codemux: sliver %s not running yet. Deferring."\
if not os.path.exists("/etc/init.d/codemux"): return
logger.log("codemux: Restarting codemux service")
if isRunning():
- logger.log_call(["/etc/init.d/codemux","condrestart", ])
+ logger.log_call(["/etc/init.d/codemux", "condrestart", ])
else:
- logger.log_call(["/etc/init.d/codemux","restart", ])
+ logger.log_call(["/etc/init.d/codemux", "restart", ])
def startService():
if not os.path.exists("/etc/init.d/codemux"): return
node_id = tools.node_id()
if 'slivers' not in data:
- logger.log_missing_data("drl.GetSlivers",'slivers')
+ logger.log_missing_data("drl.GetSlivers", 'slivers')
return
for sliver in data['slivers']:
def GetSlivers(data, config=None, plc=None):
if 'slivers' not in data:
- logger.log_missing_data("hostmap.GetSlivers",'slivers')
+ logger.log_missing_data("hostmap.GetSlivers", 'slivers')
return
if 'hostname' not in data:
def GetSlivers(data, config=None, plc=None):
if 'slivers' not in data:
- logger.log_missing_data("interfaces.GetSlivers",'slivers')
+ logger.log_missing_data("interfaces.GetSlivers", 'slivers')
return
for sliver in data['slivers']:
if tag['tagname'] == 'interface':
interfaces = eval(tag['value'])
- if not isinstance(interfaces, (list,tuple)):
+ if not isinstance(interfaces, (list, tuple)):
# if interface is not a list, then make it into a singleton list
interfaces = [interfaces]
url = mydict['url']
try:
contents = curlwrapper.retrieve(url)
- except xmlrpclib.ProtocolError,e:
+ except xmlrpclib.ProtocolError as e:
logger.log('interfaces (%s): failed to retrieve %s' % (slicename, url))
continue
else:
};
""" % locals()
- with open(radvd_conf_file,'w') as f:
+ with open(radvd_conf_file, 'w') as f:
f.write(configRadvd)
kill_radvd()
start_radvd()
valid_prefix = False
else:
valid_prefix = False
- #logger.log("ipv6: '%s'=%s" % (sliversipv6prefixtag,ipv6addr) )
+ #logger.log("ipv6: '%s'=%s" % (sliversipv6prefixtag, ipv6addr) )
valid_ipv6 = tools.is_valid_ipv6(ipv6addr)
if not(valid_ipv6):
logger.log("ipv6: the 'sliversipv6prefix' tag presented a non-valid IPv6 address!")
tools.reboot_slivers()
else:
logger.log("ipv6: starting to redefine the virtual network...")
- #network_elem = buildLibvirtDefaultNetConfig(dom,ipv6addr,prefix)
+ #network_elem = buildLibvirtDefaultNetConfig(dom, ipv6addr, prefix)
network_elem = add_ipv6(dom, ipv6addr, prefix)
set_up(networkLibvirt, connLibvirt, network_elem, ipv6addr, prefix)
logger.log("ipv6: trying to reboot the slivers...")
# hopefully temporary: when trigger script is missing, fetch it at the url here
omf_rc_trigger_url="http://git.mytestbed.net/?p=omf.git;a=blob_plain;f=omf_rc/bin/plc_trigger_omf_rc;hb=HEAD"
def fetch_trigger_script_if_missing (slicename):
- full_path="/vservers/%s/%s"%(slicename,omf_rc_trigger_script)
+ full_path="/vservers/%s/%s"%(slicename, omf_rc_trigger_script)
if not os.path.isfile (full_path):
- retcod=subprocess.call (['curl','--silent','-o',full_path,omf_rc_trigger_url])
+ retcod=subprocess.call (['curl', '--silent', '-o', full_path, omf_rc_trigger_url])
if retcod!=0:
logger.log("Could not fetch %s"%omf_rc_trigger_url)
else:
- subprocess.call(['chmod','+x',full_path])
+ subprocess.call(['chmod', '+x', full_path])
logger.log("omf_resctl: fetched %s"%(full_path))
logger.log("omf_resctl: from %s"%(omf_rc_trigger_url))
def GetSlivers(data, conf = None, plc = None):
logger.log("omf_resctl.GetSlivers")
if 'accounts' not in data:
- logger.log_missing_data("omf_resctl.GetSlivers",'accounts')
+ logger.log_missing_data("omf_resctl.GetSlivers", 'accounts')
return
try:
expires=str(sliver['expires'])
yaml_template = config_ple_template
yaml_contents = yaml_template\
- .replace('_xmpp_server_',xmpp_server)\
- .replace('_slicename_',slicename)\
- .replace('_hostname_',hostname)\
- .replace('_expires_',expires)
- yaml_full_path="/vservers/%s/%s"%(slicename,yaml_slice_path)
+ .replace('_xmpp_server_', xmpp_server)\
+ .replace('_slicename_', slicename)\
+ .replace('_hostname_', hostname)\
+ .replace('_expires_', expires)
+ yaml_full_path="/vservers/%s/%s"%(slicename, yaml_slice_path)
yaml_full_dir=os.path.dirname(yaml_full_path)
if not os.path.isdir(yaml_full_dir):
try: os.makedirs(yaml_full_dir)
except OSError: pass
- config_changes=tools.replace_file_with_string(yaml_full_path,yaml_contents)
- logger.log("yaml_contents length=%d, config_changes=%r"%(len(yaml_contents),config_changes))
+ config_changes=tools.replace_file_with_string(yaml_full_path, yaml_contents)
+ logger.log("yaml_contents length=%d, config_changes=%r"%(len(yaml_contents), config_changes))
# would make sense to also check for changes to authorized_keys
# would require saving a copy of that some place for comparison
# xxx todo
# hence sudo -i
slice_command = [ "sudo", "-i", omf_rc_trigger_script ]
to_run = tools.command_in_slice (slicename, slice_command)
- log_filename = "/vservers/%s/%s"%(slicename,omf_rc_trigger_log)
+ log_filename = "/vservers/%s/%s"%(slicename, omf_rc_trigger_log)
logger.log("omf_resctl: starting %s"%to_run)
logger.log("redirected into %s"%log_filename)
logger.log("*not* waiting for completion..")
- with open(log_filename,"a") as log_file:
- subprocess.Popen(to_run, stdout=log_file,stderr=subprocess.STDOUT)
+ with open(log_filename, "a") as log_file:
+ subprocess.Popen(to_run, stdout=log_file, stderr=subprocess.STDOUT)
# a first version tried to 'communicate' on that subprocess instance
# but that tended to create deadlocks in some cases
# causing nodemanager to stall...
def start():
logger.log('private bridge plugin starting up...')
-def log_call_read(command,timeout=logger.default_timeout_minutes*60,poll=1):
+def log_call_read(command, timeout=logger.default_timeout_minutes*60, poll=1):
message=" ".join(command)
logger.log("log_call: running command %s" % message)
logger.verbose("log_call: timeout=%r s" % timeout)
stdout = ""
while True:
# see if anything can be read within the poll interval
- (r,w,x)=select.select([child.stdout],[],[],poll)
+ (r, w, x)=select.select([child.stdout], [], [], poll)
if r: stdout = stdout + child.stdout.read(1)
# is process over ?
returncode=child.poll()
return (returncode, stdout)
# child has failed
else:
- log("log_call:end command (%s) returned with code %d" %(message,returncode))
+ log("log_call:end command (%s) returned with code %d" %(message, returncode))
return (returncode, stdout)
# no : still within timeout ?
if time.time() >= trigger:
child.terminate()
- logger.log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message,timeout))
+ logger.log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message, timeout))
return (-2, None)
break
except Exception as e:
- logger.log_exc("failed to run command %s -> %s" % (message,e))
+ logger.log_exc("failed to run command %s -> %s" % (message, e))
return (-1, None)
return stdout.split()
def ovs_addbridge(name):
- (returncode, stdout) = ovs_vsctl(["add-br",name])
+ (returncode, stdout) = ovs_vsctl(["add-br", name])
if (returncode != 0): raise OvsException("add-br")
def ovs_listports(name):
return stdout.split()
def ovs_delbridge(name):
- (returncode, stdout) = ovs_vsctl(["del-br",name])
+ (returncode, stdout) = ovs_vsctl(["del-br", name])
if (returncode != 0): raise OvsException("del-br")
def ovs_addport(name, portname, type, remoteip, key):
if (returncode != 0): raise OvsException("add-port")
def ovs_delport(name, portname):
- (returncode, stdout) = ovs_vsctl(["del-port",name,portname])
+ (returncode, stdout) = ovs_vsctl(["del-port", name, portname])
if (returncode != 0): raise OvsException("del-port")
def ensure_slicebridge_created(name, addr):
node_id = tools.node_id()
if 'slivers' not in data:
- logger.log_missing_data("privatebridge.GetSlivers",'slivers')
+ logger.log_missing_data("privatebridge.GetSlivers", 'slivers')
return
valid_bridges = []
for attribute in sliver['attributes']:
attributes[attribute['tagname']] = attribute['value']
- bridge_name = attributes.get('slice_bridge_name',None)
+ bridge_name = attributes.get('slice_bridge_name', None)
if bridge_name:
configure_slicebridge(sliver, attributes)
valid_bridges.append(bridge_name)
def GetSlivers(data, config=None, plc=None):
if 'slivers' not in data:
- logger.log_missing_data("rawdisk.GetSlivers",'slivers')
+ logger.log_missing_data("rawdisk.GetSlivers", 'slivers')
return
devices = get_unused_devices()
for sliver in data['slivers']:
for attribute in sliver['attributes']:
- name = attribute.get('tagname',attribute.get('name',''))
+ name = attribute.get('tagname', attribute.get('name', ''))
if name == 'rawdisk':
for i in devices:
st = os.stat(i)
##############################
# rough implementation for a singleton class
-def Singleton (klass,*args,**kwds):
- if not hasattr(klass,'_instance'):
- klass._instance=klass(*args,**kwds)
+def Singleton (klass, *args, **kwds):
+ if not hasattr(klass, '_instance'):
+ klass._instance=klass(*args, **kwds)
return klass._instance
def start():
# check we're using a compliant GetSlivers
if 'reservation_policy' not in data:
- logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
+ logger.log_missing_data("reservation.GetSlivers", 'reservation_policy')
return
self.reservation_policy=data['reservation_policy']
if 'leases' not in data:
- logger.log_missing_data("reservation.GetSlivers",'leases')
+ logger.log_missing_data("reservation.GetSlivers", 'leases')
return
# store data locally
# regular nodes are not affected
if self.reservation_policy == 'none':
return
- elif self.reservation_policy not in ['lease_or_idle','lease_or_shared']:
+ elif self.reservation_policy not in ['lease_or_idle', 'lease_or_shared']:
logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%self.reservation_policy)
return
- # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
+ # at this point we have reservation_policy in ['lease_or_idle', 'lease_or_shared']
# we make no difference for now
logger.log("reservation.GetSlivers: reservable node -- policy=%s"%self.reservation_policy)
self.sync_timers_from_leases()
def sync_timers_from_leases (self):
self.clear_timers()
for lease in self.data['leases']:
- self.ensure_timer_from_until(lease['t_from'],lease['t_until'])
+ self.ensure_timer_from_until(lease['t_from'], lease['t_until'])
# assuming t1<t2
- def ensure_timer_from_until (self, t1,t2):
+ def ensure_timer_from_until (self, t1, t2):
now=int(time.time())
# both times are in the past: forget about it
if t2 < now : return
# we're in the middle of the lease: make sure to arm a callback in the near future for checking
# this mostly is for starting the slice if nodemanager gets started in the middle of a lease
if t1 < now :
- self.ensure_timer (now,now+10)
+ self.ensure_timer (now, now+10)
# both are in the future : arm them
else :
- self.ensure_timer (now,self.round_time(t1))
- self.ensure_timer (now,self.round_time(t2))
+ self.ensure_timer (now, self.round_time(t1))
+ self.ensure_timer (now, self.round_time(t2))
def ensure_timer(self, now, timestamp):
if timestamp in self.timers: return
reservation.time_printable(now),
reservation.time_printable(timestamp)))
self.granularity_callback (now)
- timer=threading.Timer(timestamp-now,this_closure)
+ timer=threading.Timer(timestamp-now, this_closure)
self.timers[timestamp]=timer
timer.start()
@staticmethod
def time_printable (timestamp):
- return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
+ return time.strftime ("%Y-%m-%d %H:%M UTC", time.gmtime(timestamp))
@staticmethod
def lease_printable (lease):
leases=self.data['leases']
###
if reservation.debug:
- logger.log('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
+ logger.log('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now, round_now, time_arg))
if leases and reservation.debug:
logger.log('reservation: Listing leases beg')
for lease in leases:
ending_lease=None
for lease in leases:
if lease['t_until']==round_now:
- logger.log('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
+ logger.log('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'], reservation.lease_printable(lease)))
ending_lease=lease
starting_lease=None
for lease in leases:
if lease['t_from']==round_now:
- logger.log('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
+ logger.log('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'], reservation.lease_printable(lease)))
starting_lease=lease
########## nothing is starting nor ending
logger.log("reservation.granularity_callback: suspending all slices")
self.suspend_all_slices()
- def debug_box(self,message,slicename=None):
+ def debug_box(self, message, slicename=None):
if reservation.debug:
logger.log ('reservation: '+message)
logger.log_call( ['/usr/sbin/vserver-stat', ] )
if slicename:
- logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
+ logger.log_call ( ['/usr/sbin/vserver', slicename, 'status', ])
def is_running (self, slicename):
try:
# quick an d dirty - this does not obey the account/sliver_vs/controller hierarchy
def suspend_slice(self, slicename):
logger.log('reservation: Suspending slice %s'%(slicename))
- self.debug_box('before suspending',slicename)
+ self.debug_box('before suspending', slicename)
worker=account.get(slicename)
try:
logger.log("reservation: Located worker object %r"%worker)
# when the underlying worker is not entirely initialized yet
pass
except:
- logger.log_exc("reservation.suspend_slice: Could not stop slice through its worker",name=slicename)
+ logger.log_exc("reservation.suspend_slice: Could not stop slice through its worker", name=slicename)
# we hope the status line won't return anything
- self.debug_box('after suspending',slicename)
+ self.debug_box('after suspending', slicename)
# exclude can be a slicename or a list
# this probably should run in parallel
def suspend_all_slices (self, exclude=[]):
- if isinstance(exclude,str): exclude=[exclude,]
+ if isinstance(exclude, str): exclude=[exclude,]
for sliver in self.data['slivers']:
# skip excluded
if sliver['name'] in exclude: continue
def restart_slice(self, slicename):
logger.log('reservation: Restarting slice %s'%(slicename))
- self.debug_box('before restarting',slicename)
+ self.debug_box('before restarting', slicename)
worker=account.get(slicename)
try:
# dig in self.data to retrieve corresponding rec
logger.log("reservation: Located record at the db %r"%record)
worker.start(record)
except:
- logger.log_exc("reservation.restart_slice: Could not start slice through its worker",name=slicename)
+ logger.log_exc("reservation.restart_slice: Could not start slice through its worker", name=slicename)
# we hope the status line won't return anything
- self.debug_box('after restarting',slicename)
+ self.debug_box('after restarting', slicename)
path = '/vservers/%s' % sliver['name']
if not os.path.exists(path):
# ignore all non-plc-instantiated slivers
- instantiation = sliver.get('instantiation','')
+ instantiation = sliver.get('instantiation', '')
if instantiation == 'plc-instantiated':
logger.log("sliverauth: plc-instantiated slice %s does not yet exist. IGNORING!" % sliver['name'])
continue
def SetSliverTag(plc, slice, tagname, value):
node_id = tools.node_id()
- slivertags=plc.GetSliceTags({"name":slice,"node_id":node_id,"tagname":tagname})
+ slivertags=plc.GetSliceTags({"name":slice, "node_id":node_id, "tagname":tagname})
if len(slivertags)==0:
# looks like GetSlivers reports about delegated/nm-controller slices that do *not* belong to this node
# and this is something that AddSliceTag does not like
try:
- slivertag_id=plc.AddSliceTag(slice,tagname,value,node_id)
+ slivertag_id=plc.AddSliceTag(slice, tagname, value, node_id)
except:
logger.log_exc ("sliverauth.SetSliverTag (probably delegated) slice=%(slice)s tag=%(tagname)s node_id=%(node_id)d"%locals())
pass
else:
slivertag_id=slivertags[0]['slice_tag_id']
- plc.UpdateSliceTag(slivertag_id,value)
+ plc.UpdateSliceTag(slivertag_id, value)
def find_tag (sliver, tagname):
for attribute in sliver['attributes']:
# for legacy, try the old-fashioned 'name' as well
- name = attribute.get('tagname',attribute.get('name',''))
+ name = attribute.get('tagname', attribute.get('name', ''))
if name == tagname:
return attribute['value']
return None
random.seed()
d = [random.choice(string.letters) for x in xrange(32)]
hmac = "".join(d)
- SetSliverTag(plc,sliver['name'],'hmac',hmac)
+ SetSliverTag(plc, sliver['name'], 'hmac', hmac)
logger.log("sliverauth: %s: setting hmac" % sliver['name'])
path = '/vservers/%s/etc/planetlab' % sliver['name']
if os.path.exists(path):
keyfile = '%s/key' % path
- if (tools.replace_file_with_string(keyfile,hmac,chmod=0400)):
+ if (tools.replace_file_with_string(keyfile, hmac, chmod=0400)):
logger.log ("sliverauth: (over)wrote hmac into %s " % keyfile)
# create the key if needed and returns the key contents
def generate_sshkey (sliver):
# initial version was storing stuff in the sliver directly
-# keyfile="/vservers/%s/home/%s/.ssh/id_rsa"%(sliver['name'],sliver['name'])
+# keyfile="/vservers/%s/home/%s/.ssh/id_rsa"%(sliver['name'], sliver['name'])
# we're now storing this in the same place as the authorized_keys, which in turn
# gets mounted to the user's home directory in the sliver
keyfile="/home/%s/.ssh/id_rsa"%(sliver['name'])
os.mkdir (dotssh, 0700)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), dotssh ] )
if not os.path.isfile(pubfile):
- comment="%s@%s"%(sliver['name'],socket.gethostname())
+ comment="%s@%s"%(sliver['name'], socket.gethostname())
logger.log_call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile , '-C', comment] )
os.chmod (keyfile, 0400)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), keyfile, pubfile ] )
def GetSlivers(data, conf = None, plc = None):
if 'accounts' not in data:
- logger.log_missing_data("specialaccounts.GetSlivers",'accounts')
+ logger.log_missing_data("specialaccounts.GetSlivers", 'accounts')
return
for account in data['accounts']:
pw_dir = pw_info[5]
# populate account's .ssh/authorized_keys file
- dot_ssh = os.path.join(pw_dir,'.ssh')
+ dot_ssh = os.path.join(pw_dir, '.ssh')
if not os.access(dot_ssh, os.F_OK): os.mkdir(dot_ssh)
- auth_keys = os.path.join(dot_ssh,'authorized_keys')
+ auth_keys = os.path.join(dot_ssh, 'authorized_keys')
# catenate all keys in string, add newlines just in case (looks like keys already have this, but)
auth_keys_contents = '\n'.join(new_keys)+'\n'
- changes = tools.replace_file_with_string(auth_keys,auth_keys_contents)
+ changes = tools.replace_file_with_string(auth_keys, auth_keys_contents)
if changes:
logger.log("specialaccounts: keys file changed: %s" % auth_keys)
# always set permissions properly
os.chmod(dot_ssh, 0700)
- os.chown(dot_ssh, uid,gid)
+ os.chown(dot_ssh, uid, gid)
os.chmod(auth_keys, 0600)
- os.chown(auth_keys, uid,gid)
+ os.chown(auth_keys, uid, gid)
logger.log('specialaccounts: installed ssh keys for %s' % name)
node_id = tools.node_id()
if 'slivers' not in data:
- logger.log_missing_data("syndicate.GetSlivers",'slivers')
+ logger.log_missing_data("syndicate.GetSlivers", 'slivers')
return
syndicate_sliver = None
# TODO: what about the prefixlen? Should we also inform the prefixlen?
# here, I'm just taking the ipv6addr (value)
- value,prefixlen = tools.get_sliver_ipv6(slice['name'])
+ value, prefixlen = tools.get_sliver_ipv6(slice['name'])
node_id = tools.node_id()
- slivertags = plc.GetSliceTags({"name":slice['name'],"node_id":node_id,"tagname":tagname})
+ slivertags = plc.GetSliceTags({"name":slice['name'], "node_id":node_id, "tagname":tagname})
#logger.log(repr(str(slivertags)))
#for tag in slivertags:
# logger.log(repr(str(tag)))
try:
- slivertag_id,ipv6addr = get_sliver_tag_id_value(slivertags)
+ slivertag_id, ipv6addr = get_sliver_tag_id_value(slivertags)
except:
- slivertag_id,ipv6addr = None,None
+ slivertag_id, ipv6addr = None, None
logger.log("update_ipv6addr_slivertag: slice=%s getSliceIPv6Address=%s" % \
- (slice['name'],ipv6addr) )
+ (slice['name'], ipv6addr) )
# if the value to set is null...
if value is None:
if ipv6addr is not None:
if (ipv6addr is None) and len(value)>0:
try:
logger.log("update_ipv6addr_slivertag: slice name=%s" % (slice['name']) )
- slivertag_id=plc.AddSliceTag(slice['name'],tagname,value,node_id)
+ slivertag_id=plc.AddSliceTag(slice['name'], tagname, value, node_id)
logger.log("update_ipv6addr_slivertag: slice tag added to slice %s" % \
(slice['name']) )
except:
logger.log("update_ipv6addr_slivertag: could not set ipv6 addr tag to sliver. "+
- "slice=%s tag=%s node_id=%d" % (slice['name'],tagname,node_id) )
+ "slice=%s tag=%s node_id=%d" % (slice['name'], tagname, node_id) )
# if the ipv6 addr set on the slice is different on the value provided, let's update it
if (ipv6addr is not None) and (len(value)>0) and (ipv6addr!=value):
- plc.UpdateSliceTag(slivertag_id,value)
+ plc.UpdateSliceTag(slivertag_id, value)
# ipv6 entry on /etc/hosts of each slice
result = tools.search_ipv6addr_hosts(slice['name'], value)
if not result:
"""
if 'slivers' not in data:
- logger.log_missing_data("vsys.GetSlivers",'slivers')
+ logger.log_missing_data("vsys.GetSlivers", 'slivers')
return
# Touch ACLs and create dict of available
_restart = False
# Parse attributes and update dict of scripts
if 'slivers' not in data:
- logger.log_missing_data("vsys.GetSlivers",'slivers')
+ logger.log_missing_data("vsys.GetSlivers", 'slivers')
return
for sliver in data['slivers']:
for attribute in sliver['attributes']:
for (root, dirs, files) in os.walk(VSYSBKEND):
for file in files:
if file.endswith(".acl") and not file.startswith("local_"):
- f = open(root+"/"+file,"r+")
- scriptname = file.replace(".acl", "")
- scriptacls[scriptname] = []
- for slice in f.readlines():
- scriptacls[scriptname].append(slice.rstrip())
- f.close()
+ with open(root+"/"+file, "r+") as f:
+ scriptname = file.replace(".acl", "")
+ scriptacls[scriptname] = []
+ for slice in f.readlines():
+ scriptacls[scriptname].append(slice.rstrip())
# return what scripts are configured for which slices.
return scriptacls
if (len(slivers) != len(oldslivers)) or \
(len(set(oldslivers) - set(slivers)) != 0):
logger.log("vsys: Updating %s" % VSYSCONF)
- f = open(VSYSCONF,"w")
+ f = open(VSYSCONF, "w")
for sliver in slivers:
f.write("/vservers/%(name)s/vsys %(name)s\n" % {"name": sliver})
f.truncate()
logger.log("vsys.trashVsysHandleInSliver: no action needed, %s not found"%slice_vsys_area)
return
retcod=subprocess.call([ 'rm', '-rf' , slice_vsys_area])
- logger.log ("vsys.trashVsysHandleInSliver: Removed %s (retcod=%s)"%(slice_vsys_area,retcod))
+ logger.log ("vsys.trashVsysHandleInSliver: Removed %s (retcod=%s)"%(slice_vsys_area, retcod))
def GetSlivers(data, config=None, plc=None):
if 'slivers' not in data:
- logger.log_missing_data("vsys_privs.GetSlivers",'slivers')
+ logger.log_missing_data("vsys_privs.GetSlivers", 'slivers')
return
# Parse attributes and update dict of scripts
if 'slivers' not in data:
- logger.log_missing_data("vsys_privs.GetSlivers",'slivers')
+ logger.log_missing_data("vsys_privs.GetSlivers", 'slivers')
return
for sliver in data['slivers']:
slice = sliver['name']
cur_privs={}
priv_finder = os.walk(VSYS_PRIV_DIR)
priv_find = [i for i in priv_finder]
- (rootdir,slices,foo) = priv_find[0]
+ (rootdir, slices, foo) = priv_find[0]
for slice in slices:
cur_privs[slice]={}
if (len(priv_find)>1):
- for (slicedir,bar,tagnames) in priv_find[1:]:
+ for (slicedir, bar, tagnames) in priv_find[1:]:
if (bar != []):
# The depth of the vsys-privileges directory = 1
pass
for tagname in tagnames:
- tagfilename = os.path.join(slicedir,tagname)
+ tagfilename = os.path.join(slicedir, tagname)
with open(tagfilename) as tagfile:
values_n = tagfile.readlines()
values = [ v.rstrip() for v in values_n ]
return cur_privs
-def write_privs(cur_privs,privs):
+def write_privs(cur_privs, privs):
for slice in privs.keys():
variables = privs[slice]
- slice_dir = os.path.join(VSYS_PRIV_DIR,slice)
+ slice_dir = os.path.join(VSYS_PRIV_DIR, slice)
if (not os.path.exists(slice_dir)):
os.mkdir(slice_dir)
pass
else:
v_file = os.path.join(slice_dir, k)
- f = open(v_file,'w')
+ f = open(v_file, 'w')
data = '\n'.join(v)
f.write(data)
f.close()
- logger.log("vsys_privs: added vsys attribute %s for %s"%(k,slice))
+ logger.log("vsys_privs: added vsys attribute %s for %s"%(k, slice))
# Remove files and directories
# that are invalid
for slice in cur_privs.keys():
variables = cur_privs[slice]
- slice_dir = os.path.join(VSYS_PRIV_DIR,slice)
+ slice_dir = os.path.join(VSYS_PRIV_DIR, slice)
# Add values that do not exist
for k in variables.keys():
if (privs.has_key(slice)
and cur_privs[slice].has_key(k)):
# ok, spare this tag
- print "Sparing %s, %s "%(slice,k)
+ print "Sparing %s, %s "%(slice, k)
else:
v_file = os.path.join(slice_dir, k)
os.remove(v_file)
if __name__ == "__main__":
test_slivers = {'slivers':[
- {'name':'foo','attributes':[
- {'tagname':'vsys_m','value':'2'},
- {'tagname':'vsys_m','value':'3'},
- {'tagname':'vsys_m','value':'4'}
+ {'name':'foo', 'attributes':[
+ {'tagname':'vsys_m', 'value':'2'},
+ {'tagname':'vsys_m', 'value':'3'},
+ {'tagname':'vsys_m', 'value':'4'}
]},
- {'name':'bar','attributes':[
- #{'tagname':'vsys_x','value':'z'}
+ {'name':'bar', 'attributes':[
+ #{'tagname':'vsys_x', 'value':'z'}
]}
]}
- start(None,None)
+ start(None, None)
GetSlivers(test_slivers)
"""For each sliver with the vsys attribute, set the script ACL, create the vsys directory in the slice, and restart vsys."""
if 'slivers' not in data:
- logger.log_missing_data("vsys.GetSlivers",'slivers')
+ logger.log_missing_data("vsys.GetSlivers", 'slivers')
return
slices = []
except:
logger.log("vsys_sysctl: failed to create dir %s" % dir)
- (junk, key) = attribute['tagname'].split(".",1)
+ (junk, key) = attribute['tagname'].split(".", 1)
value = str(attribute['value'])
fn = os.path.join(dir, key)
# slice. This lets us know that we've done the sysctl.
try:
logger.log("vsys_sysctl: create file %s value %s" % (fn, value))
- with open(fn,"w") as f:
+ with open(fn, "w") as f:
f.write(value+"\n")
except:
logger.log("vsys_sysctl: failed to create file %s" % fn)
def test_value(fn, value):
try:
- slice_value = file(fn,"r").readline().strip()
+ slice_value = file(fn, "r").readline().strip()
except:
slice_value = None
responsible for handling delegation accounts.
"""
-import string,re
+import string
+import re
import time
import logger
# Take initscripts (global) returned by API, build a hash scriptname->code
iscripts_hash = {}
if 'initscripts' not in data:
- logger.log_missing_data("slivermanager.GetSlivers",'initscripts')
+ logger.log_missing_data("slivermanager.GetSlivers", 'initscripts')
return
for initscript_rec in data['initscripts']:
logger.verbose("slivermanager: initscript: %s" % initscript_rec['name'])
### set initscripts; set empty rec['initscript'] if not
# if tag 'initscript_code' is set, that's what we use
- iscode = attributes.get('initscript_code','')
+ iscode = attributes.get('initscript_code', '')
if iscode:
rec['initscript']=iscode
else: