From d3a3b2d3ea98e72183d1cb5497c38badaa0c5863 Mon Sep 17 00:00:00 2001 From: =?utf8?q?S=2E=C3=87a=C4=9Flar=20Onur?= Date: Tue, 22 Jun 2010 21:26:44 +0000 Subject: [PATCH] - Change .py files to use 4-space indents and no hard tab characters. - Trim excess spaces and tabs from ends of lines, and remove empty lines at the end of files. - Ensure the last line ends with a newline. --- accounts.py | 12 +-- api.py | 8 +- api_calls.py | 80 ++++++++++---------- bootauth.py | 2 +- bwmon.py | 102 +++++++++++++------------- conf_files.py | 4 +- controller.py | 4 +- curlwrapper.py | 14 ++-- database.py | 40 +++++----- logger.py | 12 +-- net.py | 10 +-- nodemanager.py | 74 +++++++++---------- plcapi.py | 8 +- plugins/codemux.py | 20 ++--- plugins/drl.py | 146 ++++++++++++++++++------------------- plugins/omf_resctl.py | 2 +- plugins/rawdisk.py | 6 +- plugins/reservation.py | 27 ++++--- plugins/sliverauth.py | 5 +- plugins/specialaccounts.py | 4 +- plugins/vsys.py | 14 ++-- plugins/vsys_privs.py | 12 +-- sliver_vs.py | 36 ++++----- slivermanager.py | 4 +- ticket.py | 5 +- tools.py | 4 +- 26 files changed, 326 insertions(+), 329 deletions(-) diff --git a/accounts.py b/accounts.py index ffb9d52..e9191d5 100644 --- a/accounts.py +++ b/accounts.py @@ -3,9 +3,9 @@ """Functionality common to all account classes. -Each subclass of Account must provide five methods: - (*) create() and destroy(), which are static; - (*) configure(), start(), and stop(), which are not. +Each subclass of Account must provide five methods: + (*) create() and destroy(), which are static; + (*) configure(), start(), and stop(), which are not. configure(), which takes a record as its only argument, does things like set up ssh keys. In addition, an Account subclass must @@ -132,7 +132,7 @@ class Worker: self._acct = None # the account object currently associated with this worker def ensure_created(self, rec, startingup = Startingup): - """Check account type is still valid. If not, recreate sliver. + """Check account type is still valid. If not, recreate sliver. If still valid, check if running and configure/start if not.""" logger.log_data_in_file(rec,"/var/lib/nodemanager/%s.rec.txt"%rec['name'], 'raw rec captured in ensure_created',logger.LOG_VERBOSE) @@ -153,13 +153,13 @@ If still valid, check if running and configure/start if not.""" def ensure_destroyed(self): self._destroy(self._get_class()) - def start(self, rec, d = 0): + def start(self, rec, d = 0): self._acct.configure(rec) self._acct.start(delay=d) def stop(self): self._acct.stop() - def is_running(self): + def is_running(self): if (self._acct != None) and self._acct.is_running(): status = True else: diff --git a/api.py b/api.py index b9ef8d1..f920699 100644 --- a/api.py +++ b/api.py @@ -57,7 +57,7 @@ class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): raise xmlrpclib.Fault(100, 'Invalid API method %s. Valid choices are %s' % \ (method_name, ', '.join(api_method_list))) expected_nargs = nargs_dict[method_name] - if len(args) != expected_nargs: + if len(args) != expected_nargs: raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \ (len(args), expected_nargs)) else: @@ -76,7 +76,7 @@ class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): elif method_name in ('Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'): try: result = method(*args) except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err) - else: # Execute anonymous call. + else: # Execute anonymous call. # Authenticate the caller if not in the above fncts. if method_name == "GetRecord": target_name = caller_name @@ -87,11 +87,11 @@ class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): target_rec = database.db.get(target_name) # only work on slivers or self. Sanity check. - if not (target_rec and target_rec['type'].startswith('sliver.')): + if not (target_rec and target_rec['type'].startswith('sliver.')): raise xmlrpclib.Fault(102, \ 'Invalid argument: the first argument must be a sliver name.') - # only manipulate slivers who delegate you authority + # only manipulate slivers who delegate you authority if caller_name in (target_name, target_rec['delegations']): try: result = method(target_rec, *args[1:]) except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err) diff --git a/api_calls.py b/api_calls.py index a175391..fa6ce22 100644 --- a/api_calls.py +++ b/api_calls.py @@ -31,9 +31,9 @@ except: import accounts import logger -# TODO: These try/excepts are a hack to allow doc/DocBookLocal.py to -# import this file in order to extract the documentation from each -# exported function. +# TODO: These try/excepts are a hack to allow doc/DocBookLocal.py to +# import this file in order to extract the documentation from each +# exported function. # A better approach will involve more extensive code splitting, I think. try: import database except: import logger as database @@ -94,27 +94,27 @@ def export_to_docbook(**kwargs): # accepts, # returns -@export_to_docbook(roles=['self'], - accepts=[], +@export_to_docbook(roles=['self'], + accepts=[], returns=Parameter([], 'A list of supported functions')) @export_to_api(0) def Help(): """Get a list of functions currently supported by the Node Manager API""" names=api_method_dict.keys() names.sort() - return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n' + return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n' for name in names]) -@export_to_docbook(roles=['self'], - accepts=[Parameter(str, 'A ticket returned from GetSliceTicket()')], +@export_to_docbook(roles=['self'], + accepts=[Parameter(str, 'A ticket returned from GetSliceTicket()')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def Ticket(ticket): """The Node Manager periodically polls the PLC API for a list of all - slices that are allowed to exist on the given node. Before + slices that are allowed to exist on the given node. Before actions are performed on a delegated slice (such as creation), - a controller slice must deliver a valid slice ticket to NM. - + a controller slice must deliver a valid slice ticket to NM. + This ticket is the value retured by PLC's GetSliceTicket() API call.""" try: data = ticket_module.verify(ticket) @@ -126,8 +126,8 @@ def Ticket(ticket): except Exception, err: raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err)) -@export_to_docbook(roles=['self'], - accepts=[Parameter(str, 'A ticket returned from GetSlivers()')], +@export_to_docbook(roles=['self'], + accepts=[Parameter(str, 'A ticket returned from GetSlivers()')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def AdminTicket(ticket): @@ -144,7 +144,7 @@ def AdminTicket(ticket): @export_to_docbook(roles=['self'], - accepts=[], + accepts=[], returns={'sliver_name' : Parameter(int, 'the associated xid')}) @export_to_api(0) def GetXIDs(): @@ -152,7 +152,7 @@ def GetXIDs(): return dict([(pwent[0], pwent[2]) for pwent in pwd.getpwall() if pwent[6] == sliver_vs.Sliver_VS.SHELL]) @export_to_docbook(roles=['self'], - accepts=[], + accepts=[], returns={ 'sliver_name' : Parameter(str, 'the associated SSHKey')}) @export_to_api(0) def GetSSHKeys(): @@ -164,36 +164,36 @@ def GetSSHKeys(): return keydict -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def Create(sliver_name): """Create a non-PLC-instantiated sliver""" rec = sliver_name - if rec['instantiation'] == 'delegated': + if rec['instantiation'] == 'delegated': accounts.get(rec['name']).ensure_created(rec) logger.log("api_calls: Create %s"%rec['name']) - else: + else: raise Exception, "Only PLC can create non delegated slivers." -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def Destroy(sliver_name): """Destroy a non-PLC-instantiated sliver""" - rec = sliver_name - if rec['instantiation'] == 'delegated': + rec = sliver_name + if rec['instantiation'] == 'delegated': accounts.get(rec['name']).ensure_destroyed() logger.log("api_calls: Destroy %s"%rec['name']) - else: + else: raise Exception, "Only PLC can destroy non delegated slivers." -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def Start(sliver_name): @@ -203,8 +203,8 @@ def Start(sliver_name): logger.log("api_calls: Start %s"%rec['name']) -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def Stop(sliver_name): @@ -214,8 +214,8 @@ def Stop(sliver_name): logger.log("api_calls: Stop %s"%rec['name']) -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=Parameter(int, '1 if successful')) @export_to_api(1) def ReCreate(sliver_name): @@ -226,8 +226,8 @@ def ReCreate(sliver_name): accounts.get(rec['name']).start(rec) logger.log("api_calls: ReCreate %s"%rec['name']) -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=Parameter(dict, "A resource specification")) @export_to_api(1) def GetEffectiveRSpec(sliver_name): @@ -236,8 +236,8 @@ def GetEffectiveRSpec(sliver_name): return rec.get('_rspec', {}).copy() -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns={"resource name" : Parameter(int, "amount")}) @export_to_api(1) def GetRSpec(sliver_name): @@ -246,8 +246,8 @@ def GetRSpec(sliver_name): return rec.get('rspec', {}).copy() -@export_to_docbook(roles=['nm-controller', 'self'], - accepts=[Parameter(str, 'A sliver/slice name.')], +@export_to_docbook(roles=['nm-controller', 'self'], + accepts=[Parameter(str, 'A sliver/slice name.')], returns=[Mixed(Parameter(str, 'recipient slice name'), Parameter(str, 'resource name'), Parameter(int, 'resource amount'))]) @@ -260,13 +260,13 @@ def GetLoans(sliver_name): def validate_loans(loans): """Check that is a list of valid loan specifications.""" - def validate_loan(loan): + def validate_loan(loan): return (type(loan)==list or type(loan)==tuple) and len(loan)==3 \ and type(loan[0])==str and type(loan[1])==str and loan[1] in database.LOANABLE_RESOURCES and type(loan[2])==int and loan[2]>=0 return type(loans)==list and False not in [validate_loan(load) for loan in loans] -@export_to_docbook(roles=['nm-controller', 'self'], +@export_to_docbook(roles=['nm-controller', 'self'], accepts=[ Parameter(str, 'A sliver/slice name.'), [Mixed(Parameter(str, 'recipient slice name'), Parameter(str, 'resource name'), @@ -282,12 +282,12 @@ def SetLoans(sliver_name, loans): in the future. As well, there is currently no asynchronous notification of loss of resources.""" rec = sliver_name - if not validate_loans(loans): + if not validate_loans(loans): raise xmlrpclib.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification') rec['_loans'] = loans database.db.sync() -@export_to_docbook(roles=['nm-controller', 'self'], +@export_to_docbook(roles=['nm-controller', 'self'], returns=Parameter(dict, 'Record dictionary')) @export_to_api(0) def GetRecord(sliver_name): diff --git a/bootauth.py b/bootauth.py index db3be47..445891e 100755 --- a/bootauth.py +++ b/bootauth.py @@ -4,7 +4,7 @@ # $URL$ # # Test script for obtaining a node session key. Usually, the Boot -# Manager obtains it, then writes it to /etc/planetlab/session. +# Manager obtains it, then writes it to /etc/planetlab/session. # # Mark Huang # Copyright (C) 2006 The Trustees of Princeton University diff --git a/bwmon.py b/bwmon.py index 855767e..801e02e 100644 --- a/bwmon.py +++ b/bwmon.py @@ -57,14 +57,14 @@ dev_default = tools.get_default_if() # Burst to line rate (or node cap). Set by NM. in KBit/s default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000) default_Maxi2Rate = int(bwlimit.bwmax / 1000) -# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G +# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G # 5.4 Gbyte per day max allowed transfered per recording period # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong # but its better to keep a higher byte total and keep people happy than correct # the problem and piss people off. # default_MaxKByte = 5662310 -# -- 6/1/09 +# -- 6/1/09 # llp wants to double these, so we use the following # 1mbit * 24hrs * 60mins * 60secs = bits/day # 1000000 * 24 * 60 * 60 / (1024 * 8) @@ -187,9 +187,9 @@ class Slice: i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F) MaxKByte - total volume of data allowed ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period) - Maxi2KByte - same as MaxKByte, but for i2 - Threshi2Kbyte - same as Threshi2KByte, but for i2 - MaxRate - max_rate slice attribute. + Maxi2KByte - same as MaxKByte, but for i2 + Threshi2Kbyte - same as Threshi2KByte, but for i2 + MaxRate - max_rate slice attribute. Maxi2Rate - max_exempt_rate slice attribute. Share - Used by Sirius to loan min rates Sharei2 - Used by Sirius to loan min rates for i2 @@ -217,9 +217,9 @@ class Slice: self.capped = False self.updateSliceTags(rspec) - bwlimit.set(xid = self.xid, - minrate = self.MinRate * 1000, - maxrate = self.MaxRate * 1000, + bwlimit.set(xid = self.xid, + minrate = self.MinRate * 1000, + maxrate = self.MaxRate * 1000, maxexemptrate = self.Maxi2Rate * 1000, minexemptrate = self.Mini2Rate * 1000, share = self.Share) @@ -249,34 +249,34 @@ class Slice: Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000)) if Mini2Rate != self.Mini2Rate: - self.Mini2Rate = Mini2Rate + self.Mini2Rate = Mini2Rate logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate)) Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate)) if Maxi2Rate != self.Maxi2Rate: self.Maxi2Rate = Maxi2Rate logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate)) - + MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte)) if MaxKByte != self.MaxKByte: self.MaxKByte = MaxKByte logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte)) - + Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte)) if Maxi2KByte != self.Maxi2KByte: self.Maxi2KByte = Maxi2KByte logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte)) - + ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8))) if ThreshKByte != self.ThreshKByte: self.ThreshKByte = ThreshKByte logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte)) - + Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8))) - if Threshi2KByte != self.Threshi2KByte: + if Threshi2KByte != self.Threshi2KByte: self.Threshi2KByte = Threshi2KByte logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte)) - + Share = int(rspec.get('net_share', default_Share)) if Share != self.Share: self.Share = Share @@ -284,7 +284,7 @@ class Slice: Sharei2 = int(rspec.get('net_i2_share', default_Share)) if Sharei2 != self.Sharei2: - self.Sharei2 = Sharei2 + self.Sharei2 = Sharei2 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share)) @@ -297,7 +297,7 @@ class Slice: self.Share = runningrates.get('share', 1) # Query Node Manager for max rate overrides - self.updateSliceTags(rspec) + self.updateSliceTags(rspec) # Reset baseline time self.time = time.time() @@ -306,13 +306,13 @@ class Slice: self.bytes = runningrates.get('usedbytes', 0) self.i2bytes = runningrates.get('usedi2bytes', 0) - # Reset email + # Reset email self.emailed = False # Reset flag self.capped = False # Reset rates. - maxrate = self.MaxRate * 1000 - minrate = self.MinRate * 1000 + maxrate = self.MaxRate * 1000 + minrate = self.MinRate * 1000 maxi2rate = self.Maxi2Rate * 1000 mini2rate = self.Mini2Rate * 1000 @@ -326,8 +326,8 @@ class Slice: bwlimit.format_tc_rate(maxrate), bwlimit.format_tc_rate(maxi2rate))) bwlimit.set(xid = self.xid, dev = dev_default, - minrate = self.MinRate * 1000, - maxrate = self.MaxRate * 1000, + minrate = self.MinRate * 1000, + maxrate = self.MaxRate * 1000, maxexemptrate = self.Maxi2Rate * 1000, minexemptrate = self.Mini2Rate * 1000, share = self.Share) @@ -361,10 +361,10 @@ class Slice: params['bytes'] = format_bytes(usedi2bytes - self.i2bytes) params['limit'] = format_bytes(self.Maxi2KByte * 1024) params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate) - + message += template % params logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params) - + # Notify slice if self.emailed == False: subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params @@ -380,14 +380,14 @@ class Slice: def update(self, runningrates, rspec): """ Update byte counts and check if byte thresholds have been - exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period. + exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period. Recalculate every time module runs. """ # cache share for later comparison runningrates['share'] = self.Share # Query Node Manager for max rate overrides - self.updateSliceTags(rspec) + self.updateSliceTags(rspec) usedbytes = runningrates['usedbytes'] usedi2bytes = runningrates['usedi2bytes'] @@ -409,7 +409,7 @@ class Slice: # Sanity Check new_maxrate = self.MaxRate * 1000 self.capped += False - + if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)): maxi2byte = self.Maxi2KByte * 1024 i2bytesused = usedi2bytes - self.i2bytes @@ -434,8 +434,8 @@ class Slice: (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \ (runningrates['share'] != self.Share): # Apply parameters - bwlimit.set(xid = self.xid, - minrate = self.MinRate * 1000, + bwlimit.set(xid = self.xid, + minrate = self.MinRate * 1000, maxrate = new_maxrate, minexemptrate = self.Mini2Rate * 1000, maxexemptrate = new_maxi2rate, @@ -457,7 +457,7 @@ def gethtbs(root_xid, default_xid): minrate, maxrate, minexemptrate, maxexemptrate, usedbytes, usedi2bytes) = params - + name = bwlimit.get_slice(xid) if (name is None) \ @@ -474,15 +474,15 @@ def gethtbs(root_xid, default_xid): 'maxexemptrate': maxexemptrate, 'minexemptrate': minexemptrate, 'usedbytes': usedbytes, - 'name': name, + 'name': name, 'usedi2bytes': usedi2bytes} return livehtbs def sync(nmdbcopy): """ - Syncs tc, db, and bwmon.pickle. - Then, starts new slices, kills old ones, and updates byte accounts for each running slice. + Syncs tc, db, and bwmon.pickle. + Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit. """ # Defaults @@ -496,7 +496,7 @@ def sync(nmdbcopy): # All slices names = [] - # In case the limits have changed. + # In case the limits have changed. default_MaxRate = int(bwlimit.get_bwcap() / 1000) default_Maxi2Rate = int(bwlimit.bwmax / 1000) @@ -522,12 +522,12 @@ def sync(nmdbcopy): root_xid = bwlimit.get_xid("root") default_xid = bwlimit.get_xid("default") - # Since root is required for sanity, its not in the API/plc database, so pass {} + # Since root is required for sanity, its not in the API/plc database, so pass {} # to use defaults. if root_xid not in slices.keys(): slices[root_xid] = Slice(root_xid, "root", {}) slices[root_xid].reset({}, {}) - + # Used by bwlimit. pass {} since there is no rspec (like above). if default_xid not in slices.keys(): slices[default_xid] = Slice(default_xid, "default", {}) @@ -552,7 +552,7 @@ def sync(nmdbcopy): logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__()) # Reset tc counts. for nohtbslice in nohtbslices: - if live.has_key(nohtbslice): + if live.has_key(nohtbslice): slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] ) else: logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice) @@ -562,19 +562,19 @@ def sync(nmdbcopy): slicesnodat = set(kernelhtbs.keys()) - set(slices.keys()) logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__()) for slicenodat in slicesnodat: - # But slice is running - if live.has_key(slicenodat): + # But slice is running + if live.has_key(slicenodat): # init the slice. which means start accounting over since kernel # htb was already there. - slices[slicenodat] = Slice(slicenodat, - live[slicenodat]['name'], + slices[slicenodat] = Slice(slicenodat, + live[slicenodat]['name'], live[slicenodat]['_rspec']) # Get new slices. # Slices in GetSlivers but not running HTBs newslicesxids = set(live.keys()) - set(kernelhtbs.keys()) logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__()) - + # Setup new slices for newslice in newslicesxids: # Delegated slices dont have xids (which are uids) since they haven't been @@ -599,7 +599,7 @@ def sync(nmdbcopy): "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000, "usedbytes": deadslice['htb']['usedbytes'] * 1000, "usedi2bytes": deadslice['htb']['usedi2bytes'], - "share":deadslice['htb']['share']} + "share":deadslice['htb']['share']} slices[newslice].reset(newvals, live[newslice]['_rspec']) # Bring up to date slices[newslice].update(newvals, live[newslice]['_rspec']) @@ -624,10 +624,10 @@ def sync(nmdbcopy): logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name) deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]} del slices[deadxid] - if kernelhtbs.has_key(deadxid): + if kernelhtbs.has_key(deadxid): logger.verbose("bwmon: Removing HTB for %s." % deadxid) bwlimit.off(deadxid) - + # Clean up deaddb for deadslice in deaddb.keys(): if (time.time() >= (deaddb[deadslice]['slice'].time + period)): @@ -646,7 +646,7 @@ def sync(nmdbcopy): if xid == root_xid or xid == default_xid: continue if names and name not in names: continue - + if (time.time() >= (slice.time + period)) or \ (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \ (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes): @@ -674,7 +674,7 @@ def getDefaults(nmdbcopy): status = True # default slice dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default") - if dfltslice: + if dfltslice: if dfltslice['rspec']['net_max_rate'] == -1: allOff() status = False @@ -691,13 +691,13 @@ def allOff(): kernelhtbs = gethtbs(root_xid, default_xid) if len(kernelhtbs): logger.log("bwmon: Disabling all running HTBs.") - for htb in kernelhtbs.keys(): bwlimit.off(htb) + for htb in kernelhtbs.keys(): bwlimit.off(htb) lock = threading.Event() def run(): """ - When run as a thread, wait for event, lock db, deep copy it, release it, + When run as a thread, wait for event, lock db, deep copy it, release it, run bwmon.GetSlivers(), then go back to waiting. """ logger.verbose("bwmon: Thread started") @@ -707,7 +707,7 @@ def run(): database.db_lock.acquire() nmdbcopy = copy.deepcopy(database.db) database.db_lock.release() - try: + try: if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0: # class show to check if net:InitNodeLimit:bwlimit.init has run. sync(nmdbcopy) @@ -719,5 +719,5 @@ def start(*args): tools.as_daemon_thread(run) def GetSlivers(*args): - logger.verbose ("bwmon: triggering dummy GetSlivers") + logger.verbose ("bwmon: triggering dummy GetSlivers") pass diff --git a/conf_files.py b/conf_files.py index 4801979..a858a03 100644 --- a/conf_files.py +++ b/conf_files.py @@ -16,7 +16,7 @@ import curlwrapper import logger import tools import xmlrpclib -from config import Config +from config import Config # right after net priority = 2 @@ -85,7 +85,7 @@ class conf_files: for f in data['conf_files']: try: self.update_conf_file(f) except: logger.log_exc("conf_files: failed to update conf_file") - else: + else: logger.log_missing_data("conf_files.run_once",'conf_files') diff --git a/controller.py b/controller.py index feca5e9..826ca49 100644 --- a/controller.py +++ b/controller.py @@ -1,7 +1,7 @@ # $Id$ # $URL$ -"""Delegate accounts are used to provide secure access to the XMLRPC API. +"""Delegate accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server.""" from pwd import getpwnam @@ -27,7 +27,7 @@ class Controller(accounts.Account): def is_running(self): logger.verbose("controller: is_running: %s" % self.name) return getpwnam(self.name)[6] == self.SHELL - + def add_shell(shell): """Add to /etc/shells if it's not already there.""" diff --git a/curlwrapper.py b/curlwrapper.py index da5810c..cf132b7 100644 --- a/curlwrapper.py +++ b/curlwrapper.py @@ -20,17 +20,17 @@ def retrieve(url, cacert=None, postdata=None, timeout=90): # do not follow location when attempting to download a file # curl.setopt(pycurl.FOLLOWLOCATION, 0) - # store result on the fly + # store result on the fly buffer=StringIO() curl.setopt(pycurl.WRITEFUNCTION,buffer.write) - + # set timeout - if timeout: + if timeout: curl.setopt(pycurl.CONNECTTIMEOUT, timeout) curl.setopt(pycurl.TIMEOUT, timeout) # set cacert - if cacert: + if cacert: curl.setopt(pycurl.CAINFO, cacert) curl.setopt(pycurl.SSL_VERIFYPEER, 2) else: @@ -41,7 +41,7 @@ def retrieve(url, cacert=None, postdata=None, timeout=90): if isinstance(postdata,dict): postfields = urllib.urlencode(postdata) else: - postfields=postdata + postfields=postdata curl.setopt(pycurl.POSTFIELDS, postfields) # go @@ -52,8 +52,8 @@ def retrieve(url, cacert=None, postdata=None, timeout=90): curl.close() # check the code, return 1 if successfull - if errcode == 60: - raise xmlrpclib.ProtocolError (url,errcode, "SSL certificate validation failed", postdata) + if errcode == 60: + raise xmlrpclib.ProtocolError (url,errcode, "SSL certificate validation failed", postdata) elif errcode != 200: raise xmlrpclib.ProtocolError (url,errcode, "http error %d"%errcode, postdata) diff --git a/database.py b/database.py index 1985b58..c990364 100644 --- a/database.py +++ b/database.py @@ -26,12 +26,12 @@ import bwmon # We enforce minimum allocations to keep the clueless from hosing their slivers. # Disallow disk loans because there's currently no way to punish slivers over quota. -MINIMUM_ALLOCATION = {'cpu_pct': 0, - 'cpu_share': 1, - 'net_min_rate': 0, - 'net_max_rate': 8, - 'net_i2_min_rate': 0, - 'net_i2_max_rate': 8, +MINIMUM_ALLOCATION = {'cpu_pct': 0, + 'cpu_share': 1, + 'net_min_rate': 0, + 'net_max_rate': 8, + 'net_i2_min_rate': 0, + 'net_i2_max_rate': 8, 'net_share': 1, } LOANABLE_RESOURCES = MINIMUM_ALLOCATION.keys() @@ -64,12 +64,12 @@ class Database(dict): self._min_timestamp = 0 def _compute_effective_rspecs(self): - """Calculate the effects of loans and store the result in field _rspec. + """Calculate the effects of loans and store the result in field _rspec. At the moment, we allow slivers to loan only those resources that they have received directly from PLC. -In order to do the accounting, we store three different rspecs: - * field 'rspec', which is the resources given by PLC; - * field '_rspec', which is the actual amount of resources the sliver has after all loans; - * and variable resid_rspec, which is the amount of resources the sliver +In order to do the accounting, we store three different rspecs: + * field 'rspec', which is the resources given by PLC; + * field '_rspec', which is the actual amount of resources the sliver has after all loans; + * and variable resid_rspec, which is the amount of resources the sliver has after giving out loans but not receiving any.""" slivers = {} for name, rec in self.iteritems(): @@ -100,8 +100,8 @@ keys.""" old_rec.update(rec) def set_min_timestamp(self, ts): - """The ._min_timestamp member is the timestamp on the last comprehensive update. -We use it to determine if a record is stale. + """The ._min_timestamp member is the timestamp on the last comprehensive update. +We use it to determine if a record is stale. This method should be called whenever new GetSlivers() data comes in.""" self._min_timestamp = ts for name, rec in self.items(): @@ -124,27 +124,27 @@ It may be necessary in the future to do something smarter.""" logger.verbose("database: sync : fetching accounts") existing_acct_names = accounts.all() for name in existing_acct_names: - if name not in self: + if name not in self: logger.verbose("database: sync : ensure_destroy'ing %s"%name) accounts.get(name).ensure_destroyed() for name, rec in self.iteritems(): - # protect this; if anything fails for a given sliver + # protect this; if anything fails for a given sliver # we still need the other ones to be handled try: sliver = accounts.get(name) logger.verbose("database: sync : looping on %s (shell account class from pwd %s)" %(name,sliver._get_class())) # Make sure we refresh accounts that are running - if rec['instantiation'] == 'plc-instantiated': + if rec['instantiation'] == 'plc-instantiated': logger.verbose ("database: sync : ensure_create'ing 'instantiation' sliver %s"%name) sliver.ensure_created(rec) - elif rec['instantiation'] == 'nm-controller': + elif rec['instantiation'] == 'nm-controller': logger.verbose ("database: sync : ensure_create'ing 'nm-controller' sliver %s"%name) sliver.ensure_created(rec) # Back door to ensure PLC overrides Ticket in delegation. elif rec['instantiation'] == 'delegated' and sliver._get_class() != None: # if the ticket has been delivered and the nm-controller started the slice # update rspecs and keep them up to date. - if sliver.is_running(): + if sliver.is_running(): logger.verbose ("database: sync : ensure_create'ing 'delegated' sliver %s"%name) sliver.ensure_created(rec) except: @@ -169,10 +169,10 @@ It proceeds to handle dump requests forever.""" db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL) dump_requested = False db_lock.release() - try: + try: tools.write_file(DB_FILE, lambda f: f.write(db_pickle)) logger.log_database(db) - except: + except: logger.log_exc("database.start: failed to pickle/dump") global db try: diff --git a/logger.py b/logger.py index a252ef3..b44abe2 100644 --- a/logger.py +++ b/logger.py @@ -43,7 +43,7 @@ def log(msg,level=LOG_NODE): def log_exc(msg="",name=None): """Log the traceback resulting from an exception.""" - if name: + if name: log("%s: EXCEPTION caught <%s> \n %s" %(name, msg, traceback.format_exc())) else: log("EXCEPTION caught <%s> \n %s" %(msg, traceback.format_exc())) @@ -77,14 +77,14 @@ def log_database (db): log_data_in_file (db, LOG_DATABASE, "raw database") #################### child processes -# avoid waiting until the process returns; +# avoid waiting until the process returns; # that makes debugging of hanging children hard class Buffer: def __init__ (self,message='log_call: '): self.buffer='' self.message=message - + def add (self,c): self.buffer += c if c=='\n': self.flush() @@ -105,8 +105,8 @@ def log_call(command,timeout=default_timeout_minutes*60,poll=1): verbose("log_call: poll=%r s" % poll) trigger=time.time()+timeout result = False - try: - child = subprocess.Popen(command, bufsize=1, + try: + child = subprocess.Popen(command, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) buffer = Buffer() while True: @@ -119,7 +119,7 @@ def log_call(command,timeout=default_timeout_minutes*60,poll=1): if returncode != None: buffer.flush() # child is done and return 0 - if returncode == 0: + if returncode == 0: log("log_call:end command (%s) completed" % message) result=True break diff --git a/net.py b/net.py index 760c1be..b67acf4 100644 --- a/net.py +++ b/net.py @@ -22,14 +22,14 @@ def start(options, conf): def GetSlivers(data, config, plc): logger.verbose("net: GetSlivers called.") - if not 'interfaces' in data: + if not 'interfaces' in data: logger.log_missing_data('net.GetSlivers','interfaces') return plnet.InitInterfaces(logger, plc, data) - if 'OVERRIDES' in dir(config): + if 'OVERRIDES' in dir(config): if config.OVERRIDES.get('net_max_rate') == '-1': logger.log("net: Slice and node BW Limits disabled.") - if len(bwlimit.tc("class show dev %s" % dev_default)): + if len(bwlimit.tc("class show dev %s" % dev_default)): logger.verbose("net: *** DISABLING NODE BW LIMITS ***") bwlimit.stop() else: @@ -97,7 +97,7 @@ def InitI2(plc, data): # and add IPs that don't exist in the set rather than # just recreateing the set. bwlimit.exempt_init('Internet2', i2nodes) - + # set the iptables classification rule if it doesnt exist. cmd = '-A POSTROUTING -m set --set Internet2 dst -j CLASSIFY --set-class 0001:2000 --add-mark' rules = [] @@ -109,7 +109,7 @@ def InitI2(plc, data): os.popen("/sbin/iptables -t mangle " + cmd) def InitNAT(plc, data): - + # query running network interfaces devs = sioc.gifconf() ips = dict(zip(devs.values(), devs.keys())) diff --git a/nodemanager.py b/nodemanager.py index 8e5c2ec..4e82165 100755 --- a/nodemanager.py +++ b/nodemanager.py @@ -26,7 +26,7 @@ import logger import tools from config import Config -from plcapi import PLCAPI +from plcapi import PLCAPI import random @@ -38,7 +38,7 @@ class NodeManager: # the modules in this directory that need to be run # NOTE: modules listed here will also be loaded in this order - # once loaded, they get re-ordered after their priority (lower comes first) + # once loaded, they get re-ordered after their priority (lower comes first) # for determining the runtime order core_modules=['net', 'conf_files', 'slivermanager', 'bwmon'] @@ -49,21 +49,21 @@ class NodeManager: def __init__ (self): parser = optparse.OptionParser() - parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False, + parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False, help='run daemonized') parser.add_option('-s', '--startup', action='store_true', dest='startup', default=False, help='run all sliver startup scripts') - parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', + parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', help='PLC configuration file') - parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', + parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', help='API session key (or file)') - parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period, + parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period, help='Polling interval (sec) - default %d'%NodeManager.default_period) - parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random, + parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random, help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random) - parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='more verbose log') - parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH, + parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH, help='Path to plugins directory') # NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument @@ -89,7 +89,7 @@ class NodeManager: def GetSlivers(self, config, plc): """Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins""" - try: + try: logger.log("nodemanager: Syncing w/ PLC") # retrieve GetSlivers from PLC data = plc.GetSlivers() @@ -103,7 +103,7 @@ class NodeManager: logger.log_slivers(data) logger.verbose("nodemanager: Sync w/ PLC done") last_data=data - except: + except: logger.log_exc("nodemanager: failed in GetSlivers") # XXX So some modules can at least boostrap. logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.") @@ -113,13 +113,13 @@ class NodeManager: # Invoke GetSlivers() functions from the callback modules for module in self.loaded_modules: logger.verbose('nodemanager: triggering %s.GetSlivers'%module.__name__) - try: + try: callback = getattr(module, 'GetSlivers') module_data=data if getattr(module,'persistent_data',False): module_data=last_data callback(data, config, plc) - except: + except: logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module) @@ -127,17 +127,17 @@ class NodeManager: """ Get PLC wide defaults from _default system slice. Adds them to config class. """ - for slice in data.get('slivers'): + for slice in data.get('slivers'): if slice['name'] == config.PLC_SLICE_PREFIX+"_default": attr_dict = {} - for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value'] + for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value'] if len(attr_dict): logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict) config.OVERRIDES = attr_dict return # NOTE: if an _default slice existed, it would have been found above and - # the routine would return. Thus, if we've gotten here, then no default - # slice is bound to this node. + # the routine would return. Thus, if we've gotten here, then no default + # slice is bound to this node. if 'OVERRIDES' in dir(config): del config.OVERRIDES @@ -147,23 +147,23 @@ class NodeManager: """ # GetSlivers exposes the result of GetSliceFamily() as an separate key in data # It is safe to override the attributes with this, as this method has the right logic - for sliver in data.get('slivers'): + for sliver in data.get('slivers'): try: slicefamily=sliver.get('GetSliceFamily') for att in sliver['attributes']: - if att['tagname']=='vref': + if att['tagname']=='vref': att['value']=slicefamily continue sliver['attributes'].append({ 'tagname':'vref','value':slicefamily}) except: logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",name=sliver['name']) - + def dumpSlivers (self, slivers): f = open(NodeManager.DB_FILE, "w") logger.log ("nodemanager: saving successfully fetched GetSlivers in %s" % NodeManager.DB_FILE) pickle.dump(slivers, f) f.close() - + def loadSlivers (self): try: f = open(NodeManager.DB_FILE, "r+") @@ -174,18 +174,18 @@ class NodeManager: except: logger.log("Could not restore GetSlivers from %s" % NodeManager.DB_FILE) return {} - + def run(self): try: if self.options.daemon: tools.daemon() - + # set log level if (self.options.verbose): logger.set_level(logger.LOG_VERBOSE) - + # Load /etc/planetlab/plc_config config = Config(self.options.config) - + try: other_pid = tools.pid_file() if other_pid != None: @@ -194,7 +194,7 @@ If this is not the case, please remove the pid file %s. -- exiting""" % (other_p return except OSError, err: print "Warning while writing PID file:", err - + # load modules self.loaded_modules = [] for module in self.modules: @@ -205,30 +205,30 @@ If this is not the case, please remove the pid file %s. -- exiting""" % (other_p self.loaded_modules.append(m) except ImportError, err: print "Warning while loading module %s:" % module, err - + # sort on priority (lower first) def sort_module_priority (m1,m2): return getattr(m1,'priority',NodeManager.default_priority) - getattr(m2,'priority',NodeManager.default_priority) self.loaded_modules.sort(sort_module_priority) - + logger.log('ordered modules:') - for module in self.loaded_modules: + for module in self.loaded_modules: logger.log ('%s: %s'%(getattr(module,'priority',NodeManager.default_priority),module.__name__)) - + # Load /etc/planetlab/session if os.path.exists(self.options.session): session = file(self.options.session).read().strip() else: session = None - - + + # get random periods iperiod=int(self.options.period) irandom=int(self.options.random) - + # Initialize XML-RPC client plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=iperiod/2) - + #check auth logger.log("nodemanager: Checking Auth.") while plc.check_authentication() != True: @@ -239,8 +239,8 @@ If this is not the case, please remove the pid file %s. -- exiting""" % (other_p logger.log("nodemanager: Retry Failed. (%r); Waiting.."%e) time.sleep(iperiod) logger.log("nodemanager: Authentication Succeeded!") - - + + while True: # Main nodemanager Loop logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom)) @@ -253,7 +253,7 @@ If this is not the case, please remove the pid file %s. -- exiting""" % (other_p def run(): logger.log("======================================== Entering nodemanager.py") NodeManager().run() - + if __name__ == '__main__': run() else: diff --git a/plcapi.py b/plcapi.py index ca411df..a6a74a4 100644 --- a/plcapi.py +++ b/plcapi.py @@ -41,7 +41,7 @@ class PLCAPI: def update_session(self, f="/usr/boot/plnode.txt"): - # try authenticatipopulate /etc.planetlab/session + # try authenticatipopulate /etc.planetlab/session def plnode(key): try: return [i[:-1].split('=') for i in open(f).readlines() if i.startswith(key)][0][1].strip('"') @@ -53,13 +53,13 @@ class PLCAPI: open("/etc/planetlab/session", 'w').write(plc.GetSession().strip()) self.session = open("/etc/planetlab/session").read().strip() - + def check_authentication(self): authstatus = False if self.key or self.session: - try: + try: authstatus = self.AuthCheck() - except: + except: logger.log_exc("plcapi: failed in plcapi.check_authentication") return authstatus diff --git a/plugins/codemux.py b/plugins/codemux.py index ef8634f..2c532b8 100644 --- a/plugins/codemux.py +++ b/plugins/codemux.py @@ -15,7 +15,7 @@ def start(options, conf): def GetSlivers(data, config, plc = None): """ - For each sliver with the codemux attribute, parse out "host,port" + For each sliver with the codemux attribute, parse out "host,port" and make entry in conf. Restart service after. """ if 'OVERRIDES' in dir(config): @@ -29,7 +29,7 @@ def GetSlivers(data, config, plc = None): slicesinconf = parseConf() # slices that need to be written to the conf codemuxslices = {} - + # XXX Hack for planetflow if slicesinconf.has_key("root"): _writeconf = False else: _writeconf = True @@ -74,11 +74,11 @@ def GetSlivers(data, config, plc = None): # Remove slices from conf that no longer have the attribute for deadslice in set(slicesinconf.keys()) - set(codemuxslices.keys()): # XXX Hack for root slice - if deadslice != "root": + if deadslice != "root": logger.log("codemux: Removing %s" % deadslice) - _writeconf = True + _writeconf = True - if _writeconf: writeConf(sortDomains(codemuxslices)) + if _writeconf: writeConf(sortDomains(codemuxslices)) # ensure the service is running startService() @@ -87,9 +87,9 @@ def writeConf(slivers, conf = CODEMUXCONF): '''Write conf with default entry up top. Elements in [] should have lower order domain names first. Restart service.''' f = open(conf, "w") # This needs to be the first entry... - try: + try: f.write("* root 1080 %s\n" % Config().PLC_PLANETFLOW_HOST) - except AttributeError: + except AttributeError: logger.log("codemux: Can't find PLC_CONFIG_HOST in config. Using PLC_API_HOST") f.write("* root 1080 %s\n" % Config().PLC_API_HOST) # Sort items for like domains @@ -117,14 +117,14 @@ def sortDomains(slivers): # make list of slivers sortedslices = [] for host in hosts: sortedslices.append({host: dnames[host]}) - + return sortedslices - + def parseConf(conf = CODEMUXCONF): '''Parse the CODEMUXCONF and return dict of slices in conf. {slice: (host,port)}''' slicesinconf = {} # default - try: + try: f = open(conf) for line in f.readlines(): if line.startswith("#") \ diff --git a/plugins/drl.py b/plugins/drl.py index a82ca78..e66a575 100644 --- a/plugins/drl.py +++ b/plugins/drl.py @@ -19,88 +19,88 @@ drl = """ """ def start(options, conf): - logger.log('drl plugin starting up...') + logger.log('drl plugin starting up...') def DRLSetup(site_name, slice_name, site_id, bw_limit, peer): - DRL_file = '/vservers/%s/etc/drl.xml' % slice_name - DRL_config = drl % (site_name, site_id, bw_limit, peer) - - # Check config changes - if os.path.exists(DRL_file): - import md5 - new_digest = md5.new(DRL_config).digest() - old_digest = md5.new(open(DRL_file).read()).digest() - if old_digest == new_digest: - logger.log('drl: %s already exists...' % DRL_file) - DRLInstall(slice_name) - return - DRLConfig(DRL_file, DRL_config) - DRLInstall(slice_name) + DRL_file = '/vservers/%s/etc/drl.xml' % slice_name + DRL_config = drl % (site_name, site_id, bw_limit, peer) + + # Check config changes + if os.path.exists(DRL_file): + import md5 + new_digest = md5.new(DRL_config).digest() + old_digest = md5.new(open(DRL_file).read()).digest() + if old_digest == new_digest: + logger.log('drl: %s already exists...' % DRL_file) + DRLInstall(slice_name) + return + DRLConfig(DRL_file, DRL_config) + DRLInstall(slice_name) def DRLConfig(DRL_file, DRL_config): - logger.log('drl: %s is out-dated...' % DRL_file) - logger.log('drl: generating %s' % DRL_file) - f = open( DRL_file, 'w') - f.write(DRL_config) - f.close() + logger.log('drl: %s is out-dated...' % DRL_file) + logger.log('drl: generating %s' % DRL_file) + f = open( DRL_file, 'w') + f.write(DRL_config) + f.close() def DRLInstall(slice_name): - if not os.path.exists('/vservers/%s/etc/yum.repos.d/myplc.repo' % slice_name): - shutil.copyfile('/etc/yum.myplc.d/myplc.repo', '/vservers/%s/etc/yum.repos.d/myplc.repo' % slice_name) - logger.log('drl: installing DistributedRateLimiting into %s slice' % slice_name) - logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'yum', 'install', '-y', '-q', 'DistributedRateLimiting']) - logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'chkconfig', '--add', 'ulogd']) - else: - logger.log('drl: updating DistributedRateLimiting on %s slice' % slice_name) - logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'yum', 'update', '-y', '-q', 'DistributedRateLimiting']) - - logger.log('drl: (re)starting DistributedRateLimiting service') - logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'service', 'ulogd', 'restart']) + if not os.path.exists('/vservers/%s/etc/yum.repos.d/myplc.repo' % slice_name): + shutil.copyfile('/etc/yum.myplc.d/myplc.repo', '/vservers/%s/etc/yum.repos.d/myplc.repo' % slice_name) + logger.log('drl: installing DistributedRateLimiting into %s slice' % slice_name) + logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'yum', 'install', '-y', '-q', 'DistributedRateLimiting']) + logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'chkconfig', '--add', 'ulogd']) + else: + logger.log('drl: updating DistributedRateLimiting on %s slice' % slice_name) + logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'yum', 'update', '-y', '-q', 'DistributedRateLimiting']) + + logger.log('drl: (re)starting DistributedRateLimiting service') + logger.log_call(['vserver', '%s' % slice_name, 'suexec', '0', 'service', 'ulogd', 'restart']) def GetSlivers(data, conf = None, plc = None): - DRL_SLICE_NAME = '' - HAVE_DRL = 0 - node_id = tools.node_id() - - if 'slivers' not in data: - logger.log_missing_data("drl.GetSlivers",'slivers') - return - - for sliver in data['slivers']: - for attribute in sliver['attributes']: - tag = attribute['tagname'] - value = attribute['value'] - if tag == 'drl' and value == '1': - HAVE_DRL = 1 - DRL_SLICE_NAME = sliver['name'] - - if HAVE_DRL: - site_id = plc.GetNodes({'node_id': int(node_id) }, ['site_id']) - site_id = site_id[0]['site_id'] - - q = plc.GetSites({'site_id': site_id, 'enabled': True, 'peer_site_id': None}, ['name', 'node_ids']) - for i in q: - if i['node_ids'] != [] and len(i['node_ids']) > 1: - z = plc.GetInterfaces({'node_id': i['node_ids'], 'is_primary': True, '~bwlimit': None}, ['node_id', 'ip', 'bwlimit']) - total_bwlimit = 0 - peer = '' - node_has_bwlimit = 0 - for j in range(len(z)): - total_bwlimit += z[j]['bwlimit'] - if z[j]['node_id'] != int(node_id): - peer += '\t%s\n' % z[j]['ip'] - else: - node_has_bwlimit = 1 - if node_has_bwlimit: - DRLSetup(i['name'], DRL_SLICE_NAME, site_id, total_bwlimit/1000, peer) - else: - logger.log('drl: This node has no bwlimit') - - else: - logger.log('drl: This site has only %s node' % len(i['node_ids'])) - else: - logger.log('drl: This node has no drl slice!...') + DRL_SLICE_NAME = '' + HAVE_DRL = 0 + node_id = tools.node_id() + + if 'slivers' not in data: + logger.log_missing_data("drl.GetSlivers",'slivers') + return + + for sliver in data['slivers']: + for attribute in sliver['attributes']: + tag = attribute['tagname'] + value = attribute['value'] + if tag == 'drl' and value == '1': + HAVE_DRL = 1 + DRL_SLICE_NAME = sliver['name'] + + if HAVE_DRL: + site_id = plc.GetNodes({'node_id': int(node_id) }, ['site_id']) + site_id = site_id[0]['site_id'] + + q = plc.GetSites({'site_id': site_id, 'enabled': True, 'peer_site_id': None}, ['name', 'node_ids']) + for i in q: + if i['node_ids'] != [] and len(i['node_ids']) > 1: + z = plc.GetInterfaces({'node_id': i['node_ids'], 'is_primary': True, '~bwlimit': None}, ['node_id', 'ip', 'bwlimit']) + total_bwlimit = 0 + peer = '' + node_has_bwlimit = 0 + for j in range(len(z)): + total_bwlimit += z[j]['bwlimit'] + if z[j]['node_id'] != int(node_id): + peer += '\t%s\n' % z[j]['ip'] + else: + node_has_bwlimit = 1 + if node_has_bwlimit: + DRLSetup(i['name'], DRL_SLICE_NAME, site_id, total_bwlimit/1000, peer) + else: + logger.log('drl: This node has no bwlimit') + + else: + logger.log('drl: This site has only %s node' % len(i['node_ids'])) + else: + logger.log('drl: This node has no drl slice!...') diff --git a/plugins/omf_resctl.py b/plugins/omf_resctl.py index f1a6d2a..87629aa 100644 --- a/plugins/omf_resctl.py +++ b/plugins/omf_resctl.py @@ -21,7 +21,7 @@ def start(options, conf): logger.log("omf_resctl: plugin starting up...") def GetSlivers(data, conf = None, plc = None): - if 'accounts' not in data: + if 'accounts' not in data: logger.log_missing_data("omf_resctl.GetSlivers",'accounts') return diff --git a/plugins/rawdisk.py b/plugins/rawdisk.py index e7b1006..34dce8b 100644 --- a/plugins/rawdisk.py +++ b/plugins/rawdisk.py @@ -50,14 +50,14 @@ def get_unused_devices(): return devices def GetSlivers(data, config=None, plc=None): - if 'slivers' not in data: + if 'slivers' not in data: logger.log_missing_data("rawdisk.GetSlivers",'slivers') return devices = get_unused_devices() for sliver in data['slivers']: for attribute in sliver['attributes']: - name = attribute.get('tagname',attribute.get('name','')) + name = attribute.get('tagname',attribute.get('name','')) if name == 'rawdisk': for i in devices: st = os.stat(i) @@ -65,7 +65,7 @@ def GetSlivers(data, config=None, plc=None): if os.path.exists(path): # should check whether its the proper type of device continue - + logger.log("rawdisk: Copying %s to %s" % (i, path)) try: if os.path.exists(path): diff --git a/plugins/reservation.py b/plugins/reservation.py index a087259..6b19691 100644 --- a/plugins/reservation.py +++ b/plugins/reservation.py @@ -20,7 +20,7 @@ priority = 45 # this instructs nodemanager that we want to use the latest known data in case the plc link is down persistent_data = True -# of course things would be simpler if node manager was to create one instance of the plugins +# of course things would be simpler if node manager was to create one instance of the plugins # instead of blindly caling functions in the module... ############################## @@ -47,21 +47,21 @@ class reservation: self.data = None # this is a dict mapping a raounded timestamp to the corr. Timer object self.timers = {} - + #################### def start(self,options,conf): logger.log("reservation: plugin performing dummy start...") - # this method is entirely about making sure that we have events scheduled + # this method is entirely about making sure that we have events scheduled # at the intervals where there is a lease that starts or ends def GetSlivers (self, data, conf=None, plc=None): - + # check we're using a compliant GetSlivers - if 'reservation_policy' not in data: + if 'reservation_policy' not in data: logger.log_missing_data("reservation.GetSlivers",'reservation_policy') return reservation_policy=data['reservation_policy'] - if 'leases' not in data: + if 'leases' not in data: logger.log_missing_data("reservation.GetSlivers",'leases') return @@ -78,7 +78,7 @@ class reservation: # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared'] # we make no difference for now logger.verbose('reservation.GetSlivers : reservable node -- listing timers ') - + self.sync_timers_from_leases() if reservation.debug: self.list_timers() @@ -129,7 +129,7 @@ class reservation: self.timers[round]=timer timer.start() - + @staticmethod def time_printable (timestamp): return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp)) @@ -188,7 +188,7 @@ class reservation: return # otherwise things are simple - if ending_lease: + if ending_lease: self.suspend_slice (ending_lease['name']) if not starting_lease: logger.log("'lease_or_shared' is xxx todo - would restart to shared mode") @@ -210,14 +210,14 @@ class reservation: logger.log_call( ['/usr/sbin/vserver-stat', ] ) if slicename: logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ]) - + def is_running (self, slicename): try: return accounts.get(slicename).is_running() except: return False - # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy + # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy def suspend_slice(self, slicename): logger.log('reservation: Suspending slice %s'%(slicename)) self.debug_box('before suspending',slicename) @@ -229,7 +229,7 @@ class reservation: logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename) # we hope the status line won't return anything self.debug_box('after suspending',slicename) - + def suspend_all_slices (self): for sliver in self.data['slivers']: # is this a system sliver ? @@ -249,7 +249,7 @@ class reservation: sliver=slivers[0] record=database.db.get(slicename) record['enabled']=True - # + # logger.verbose("reservation: Located worker object %r"%worker) logger.verbose("reservation: Located record at the db %r"%record) worker.start(record) @@ -257,4 +257,3 @@ class reservation: logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename) # we hope the status line won't return anything self.debug_box('after restarting',slicename) - diff --git a/plugins/sliverauth.py b/plugins/sliverauth.py index ce17b0c..e840a4d 100644 --- a/plugins/sliverauth.py +++ b/plugins/sliverauth.py @@ -57,7 +57,7 @@ def GetSlivers(data, config, plc): if instantiation == 'plc-instantiated': logger.log("sliverauth: plc-instantiated slice %s does not yet exist. IGNORING!" % sliver['name']) continue - + found_hmac = False for attribute in sliver['attributes']: name = attribute.get('tagname',attribute.get('name','')) @@ -76,7 +76,7 @@ def GetSlivers(data, config, plc): path = '/vservers/%s/etc/planetlab' % sliver['name'] if os.path.exists(path): - keyfile = '%s/key' % path + keyfile = '%s/key' % path oldhmac = '' if os.path.exists(keyfile): f = open(keyfile,'r') @@ -94,4 +94,3 @@ def GetSlivers(data, config, plc): logger.log("sliverauth: writing hmac to %s " % keyfile) os.chmod(keyfile,0400) - diff --git a/plugins/specialaccounts.py b/plugins/specialaccounts.py index 207eb55..14a9cc9 100644 --- a/plugins/specialaccounts.py +++ b/plugins/specialaccounts.py @@ -29,7 +29,7 @@ def start(options, conf): logger.log("specialaccounts: plugin starting up...") def GetSlivers(data, conf = None, plc = None): - if 'accounts' not in data: + if 'accounts' not in data: logger.log_missing_data("specialaccounts.GetSlivers",'accounts') return @@ -56,7 +56,7 @@ def GetSlivers(data, conf = None, plc = None): changes = tools.replace_file_with_string(auth_keys,auth_keys_contents) if changes: logger.log("specialaccounts: keys file changed: %s" % auth_keys) - + # always set permissions properly os.chmod(dot_ssh, 0700) os.chown(dot_ssh, uid,gid) diff --git a/plugins/vsys.py b/plugins/vsys.py index 1315371..e6c4b12 100644 --- a/plugins/vsys.py +++ b/plugins/vsys.py @@ -38,7 +38,7 @@ def GetSlivers(data, config=None, plc=None): _restart = createVsysDir(sliver['name']) or _restart if attribute['value'] in scripts.keys(): scripts[attribute['value']].append(sliver['name']) - + # Write the conf _restart = writeConf(slices, parseConf()) or _restart # Write out the ACLs @@ -49,15 +49,15 @@ def GetSlivers(data, config=None, plc=None): def createVsysDir(sliver): '''Create /vsys directory in slice. Update vsys conf file.''' - try: + try: os.mkdir("/vservers/%s/vsys" % sliver) return True - except OSError: + except OSError: return False def touchAcls(): - '''Creates empty acl files for scripts. + '''Creates empty acl files for scripts. To be ran in case of new scripts that appear in the backend. Returns list of available scripts.''' acls = [] @@ -75,7 +75,7 @@ def touchAcls(): f = open("%s/%s.acl" %(VSYSBKEND, new), "w") f.write("\n") f.close() - + return scripts @@ -109,7 +109,7 @@ def parseAcls(): f = open(root+"/"+file,"r+") scriptname = file.replace(".acl", "") scriptacls[scriptname] = [] - for slice in f.readlines(): + for slice in f.readlines(): scriptacls[scriptname].append(slice.rstrip()) f.close() # return what scripts are configured for which slices. @@ -138,7 +138,7 @@ def parseConf(): '''Parse the vsys conf and return list of slices in conf.''' scriptacls = {} slicesinconf = [] - try: + try: f = open(VSYSCONF) for line in f.readlines(): (path, slice) = line.split() diff --git a/plugins/vsys_privs.py b/plugins/vsys_privs.py index c3b632b..dea3b44 100755 --- a/plugins/vsys_privs.py +++ b/plugins/vsys_privs.py @@ -85,7 +85,7 @@ def write_privs(cur_privs,privs): # Add values that do not exist for k in variables.keys(): v = variables[k] - if (cur_privs.has_key(slice) + if (cur_privs.has_key(slice) and cur_privs[slice].has_key(k) and cur_privs[slice][k] == v): # The binding has not changed @@ -98,7 +98,7 @@ def write_privs(cur_privs,privs): f.close() logger.log("vsys_privs: added vsys attribute %s for %s"%(k,slice)) - # Remove files and directories + # Remove files and directories # that are invalid for slice in cur_privs.keys(): variables = cur_privs[slice] @@ -106,19 +106,19 @@ def write_privs(cur_privs,privs): # Add values that do not exist for k in variables.keys(): - if (privs.has_key(slice) + if (privs.has_key(slice) and cur_privs[slice].has_key(k)): # ok, spare this tag - print "Sparing %s, %s "%(slice,k) + print "Sparing %s, %s "%(slice,k) else: v_file = os.path.join(slice_dir, k) - os.remove(v_file) + os.remove(v_file) if (not privs.has_key(slice)): os.rmdir(slice_dir) -if __name__ == "__main__": +if __name__ == "__main__": test_slivers = {'slivers':[ {'name':'foo','attributes':[ {'tagname':'vsys_m','value':'2'}, diff --git a/sliver_vs.py b/sliver_vs.py index d269b81..43b3cf2 100644 --- a/sliver_vs.py +++ b/sliver_vs.py @@ -82,10 +82,10 @@ class Sliver_VS(accounts.Account, vserver.VServer): if vref is None: logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name)) return - # used to look in /etc/planetlab/family, + # used to look in /etc/planetlab/family, # now relies on the 'GetSliceFamily' extra attribute in GetSlivers() # which for legacy is still exposed here as the 'vref' key - + # check the template exists -- there's probably a better way.. if not os.path.isdir ("/vservers/.vref/%s"%vref): logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref)) @@ -98,7 +98,7 @@ class Sliver_VS(accounts.Account, vserver.VServer): # and that's not quite right except: arch='i386' - + def personality (arch): personality="linux32" if arch.find("64")>=0: @@ -116,7 +116,7 @@ class Sliver_VS(accounts.Account, vserver.VServer): logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch))) @staticmethod - def destroy(name): + def destroy(name): # logger.log_call(['/usr/sbin/vuserdel', name, ]) logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ]) @@ -152,20 +152,20 @@ class Sliver_VS(accounts.Account, vserver.VServer): logger.log_exc("vsliver_vs: %s: could not install generic vinit script"%self.name) # create symlink for runlevel 3 if not os.path.islink(rc3_link): - try: - logger.log("vsliver_vs: %s: installing generic vinit rc script"%self.name) - os.symlink(rc3_target,rc3_link) - except: - logger.log_exc("vsliver_vs: %s: failed to install runlevel3 link") - + try: + logger.log("vsliver_vs: %s: installing generic vinit rc script"%self.name) + os.symlink(rc3_target,rc3_link) + except: + logger.log_exc("vsliver_vs: %s: failed to install runlevel3 link") + def start(self, delay=0): - if self.rspec['enabled'] <= 0: + if self.rspec['enabled'] <= 0: logger.log('sliver_vs: not starting %s, is not enabled'%self.name) else: logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay)) time.sleep(delay) - # VServer.start calls fork() internally, + # VServer.start calls fork() internally, # so just close the nonstandard fds and fork once to avoid creating zombies child_pid = os.fork() if child_pid == 0: @@ -190,7 +190,7 @@ class Sliver_VS(accounts.Account, vserver.VServer): tools.close_nonstandard_fds() vserver.VServer.start(self) os._exit(0) - else: + else: os.waitpid(child_pid, 0) self.initscriptchanged = False @@ -198,7 +198,7 @@ class Sliver_VS(accounts.Account, vserver.VServer): logger.log('sliver_vs: %s: stopping' % self.name) vserver.VServer.stop(self) - def is_running(self): + def is_running(self): return vserver.VServer.is_running(self) def set_resources(self,setup=False): @@ -273,8 +273,8 @@ class Sliver_VS(accounts.Account, vserver.VServer): (self.name, self.rspec['ip_addresses'])) self.set_ipaddresses_config(self.rspec['ip_addresses']) - #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id)) - #self.setname(self.slice_id) + #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id)) + #self.setname(self.slice_id) #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id)) try: vserver_config_path = '/etc/vservers/%s'%self.name @@ -286,12 +286,12 @@ class Sliver_VS(accounts.Account, vserver.VServer): logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e))) except Exception,e: logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name) - + if self.enabled == False: self.enabled = True self.start() - + if False: # Does not work properly yet. if self.have_limits_changed(): logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name) diff --git a/slivermanager.py b/slivermanager.py index 2122865..d2d6131 100644 --- a/slivermanager.py +++ b/slivermanager.py @@ -147,13 +147,13 @@ def GetSlivers(data, config = None, plc=None, fullupdate=True): database.db.sync() accounts.Startingup = False -def deliver_ticket(data): +def deliver_ticket(data): return GetSlivers(data, fullupdate=False) def start(options, config): for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.iteritems(): DEFAULT_ALLOCATION[resname]=default_amount - + accounts.register_class(sliver_vs.Sliver_VS) accounts.register_class(controller.Controller) accounts.Startingup = options.startup diff --git a/ticket.py b/ticket.py index aa24b88..0012908 100644 --- a/ticket.py +++ b/ticket.py @@ -14,7 +14,7 @@ GPG = '/usr/bin/gpg' def _popen_gpg(*args): """Return a Popen object to GPG.""" - return Popen((GPG, '--batch', '--no-tty') + args, + return Popen((GPG, '--batch', '--no-tty') + args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) def sign(data): @@ -37,9 +37,8 @@ def verify(signed_msg): msg = p.stdout.read() p.stdout.close() p.stderr.close() - if p.wait(): + if p.wait(): return None # verification failed else: data, = loads(msg)[0] return data - diff --git a/tools.py b/tools.py index 98a5b7e..c5d3e40 100644 --- a/tools.py +++ b/tools.py @@ -91,8 +91,8 @@ def fork_as(su, function, *args): #################### # manage files def pid_file(): - """We use a pid file to ensure that only one copy of NM is running at a given time. -If successful, this function will write a pid file containing the pid of the current process. + """We use a pid file to ensure that only one copy of NM is running at a given time. +If successful, this function will write a pid file containing the pid of the current process. The return value is the pid of the other running process, or None otherwise.""" other_pid = None if os.access(PID_FILE, os.F_OK): # check for a pid file -- 2.43.0