X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=database.py;h=0680b6cb118df596c15a8b05182d39960de9dab7;hb=ac6786141feadccdc48b819ab21a825a54881ded;hp=ab4082448981cde9416dcfa7136841290f200106;hpb=74a8cfb9d2eac39ae02e5323e1fb5b1d33297981;p=nodemanager.git diff --git a/database.py b/database.py index ab40824..0680b6c 100644 --- a/database.py +++ b/database.py @@ -1,6 +1,4 @@ -# $Id$ -# $URL$ - +# """The database houses information on slivers. This information reaches the sliver manager in two different ways: one, through the GetSlivers() call made periodically; two, by users delivering tickets. @@ -15,21 +13,35 @@ In order to maintain service when the node reboots during a network partition, the database is constantly being dumped to disk. """ +import sys + import cPickle import threading import time -import accounts +import account import logger import tools import bwmon +# hopefully temporary +# is there a good reason to have this done here and not in a plugin ? +try: from coresched_lxc import CoreSched +except: from coresched_vs import CoreSched + # We enforce minimum allocations to keep the clueless from hosing their slivers. # Disallow disk loans because there's currently no way to punish slivers over quota. -MINIMUM_ALLOCATION = {'cpu_pct': 0, 'cpu_share': 1, 'net_min_rate': 0, 'net_max_rate': 8, 'net_i2_min_rate': 0, 'net_i2_max_rate': 8, 'net_share': 1} +MINIMUM_ALLOCATION = {'cpu_pct': 0, + 'cpu_share': 1, + 'net_min_rate': 0, + 'net_max_rate': 8, + 'net_i2_min_rate': 0, + 'net_i2_max_rate': 8, + 'net_share': 1, + } LOANABLE_RESOURCES = MINIMUM_ALLOCATION.keys() -DB_FILE = '/root/sliver_mgr_db.pickle' +DB_FILE = '/var/lib/nodemanager/database.pickle' # database object and associated lock @@ -57,7 +69,13 @@ class Database(dict): self._min_timestamp = 0 def _compute_effective_rspecs(self): - """Calculate the effects of loans and store the result in field _rspec. At the moment, we allow slivers to loan only those resources that they have received directly from PLC. In order to do the accounting, we store three different rspecs: field 'rspec', which is the resources given by PLC; field '_rspec', which is the actual amount of resources the sliver has after all loans; and variable resid_rspec, which is the amount of resources the sliver has after giving out loans but not receiving any.""" + """Calculate the effects of loans and store the result in field _rspec. +At the moment, we allow slivers to loan only those resources that they have received directly from PLC. +In order to do the accounting, we store three different rspecs: + * field 'rspec', which is the resources given by PLC; + * field '_rspec', which is the actual amount of resources the sliver has after all loans; + * and variable resid_rspec, which is the amount of resources the sliver + has after giving out loans but not receiving any.""" slivers = {} for name, rec in self.iteritems(): if 'rspec' in rec: @@ -66,14 +84,17 @@ class Database(dict): for rec in slivers.itervalues(): eff_rspec = rec['_rspec'] resid_rspec = rec['rspec'].copy() - for target, resname, amt in rec.get('_loans', []): - if target in slivers and amt <= resid_rspec[resname] - MINIMUM_ALLOCATION[resname]: - eff_rspec[resname] -= amt - resid_rspec[resname] -= amt - slivers[target]['_rspec'][resname] += amt + for target, resource_name, amount in rec.get('_loans', []): + if target in slivers and amount <= resid_rspec[resource_name] - MINIMUM_ALLOCATION[resource_name]: + eff_rspec[resource_name] -= amount + resid_rspec[resource_name] -= amount + slivers[target]['_rspec'][resource_name] += amount def deliver_record(self, rec): - """A record is simply a dictionary with 'name' and 'timestamp' keys. We keep some persistent private data in the records under keys that start with '_'; thus record updates should not displace such keys.""" + """A record is simply a dictionary with 'name' and 'timestamp' +keys. We keep some persistent private data in the records under keys +that start with '_'; thus record updates should not displace such +keys.""" if rec['timestamp'] < self._min_timestamp: return name = rec['name'] old_rec = self.get(name) @@ -84,13 +105,18 @@ class Database(dict): old_rec.update(rec) def set_min_timestamp(self, ts): - """The ._min_timestamp member is the timestamp on the last comprehensive update. We use it to determine if a record is stale. This method should be called whenever new GetSlivers() data comes in.""" + """The ._min_timestamp member is the timestamp on the last comprehensive update. +We use it to determine if a record is stale. +This method should be called whenever new GetSlivers() data comes in.""" self._min_timestamp = ts for name, rec in self.items(): if rec['timestamp'] < ts: del self[name] def sync(self): - """Synchronize reality with the database contents. This method does a lot of things, and it's currently called after every single batch of database changes (a GetSlivers(), a loan, a record). It may be necessary in the future to do something smarter.""" + """Synchronize reality with the database contents. This +method does a lot of things, and it's currently called after every +single batch of database changes (a GetSlivers(), a loan, a record). +It may be necessary in the future to do something smarter.""" # delete expired records now = time.time() @@ -99,35 +125,43 @@ class Database(dict): self._compute_effective_rspecs() + try: + coresched = CoreSched() + coresched.adjustCores(self) + except: + logger.log_exc("database: exception while doing core sched") + # create and destroy accounts as needed logger.verbose("database: sync : fetching accounts") - existing_acct_names = accounts.all() + existing_acct_names = account.all() for name in existing_acct_names: - if name not in self: + if name not in self: logger.verbose("database: sync : ensure_destroy'ing %s"%name) - accounts.get(name).ensure_destroyed() + account.get(name).ensure_destroyed() for name, rec in self.iteritems(): - # protect this; if anything fails for a given sliver + # protect this; if anything fails for a given sliver # we still need the other ones to be handled try: - sliver = accounts.get(name) - logger.verbose("database: sync : looping on %s (shell account class from pwd %s)" %(name,sliver._get_class())) + sliver = account.get(name) + logger.verbose("database: sync : looping on %s (shell account class from pwd %s)" %(name, sliver._get_class())) # Make sure we refresh accounts that are running - if rec['instantiation'] == 'plc-instantiated': + if rec['instantiation'] == 'plc-instantiated': logger.verbose ("database: sync : ensure_create'ing 'instantiation' sliver %s"%name) sliver.ensure_created(rec) - elif rec['instantiation'] == 'nm-controller': + elif rec['instantiation'] == 'nm-controller': logger.verbose ("database: sync : ensure_create'ing 'nm-controller' sliver %s"%name) sliver.ensure_created(rec) # Back door to ensure PLC overrides Ticket in delegation. elif rec['instantiation'] == 'delegated' and sliver._get_class() != None: - # if the ticket has been delivered and the nm-contoroller started the slice + # if the ticket has been delivered and the nm-controller started the slice # update rspecs and keep them up to date. - if sliver.is_running(): + if sliver.is_running(): logger.verbose ("database: sync : ensure_create'ing 'delegated' sliver %s"%name) sliver.ensure_created(rec) + except SystemExit as e: + sys.exit(e) except: - logger.log_exc("database: sync failed to handle sliver",name=name) + logger.log_exc("database: sync failed to handle sliver", name=name) # Wake up bwmom to update limits. bwmon.lock.set() @@ -137,7 +171,9 @@ class Database(dict): def start(): - """The database dumper daemon. When it starts up, it populates the database with the last dumped database. It proceeds to handle dump requests forever.""" + """The database dumper daemon. +When it starts up, it populates the database with the last dumped database. +It proceeds to handle dump requests forever.""" def run(): global dump_requested while True: @@ -146,8 +182,11 @@ def start(): db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL) dump_requested = False db_lock.release() - try: tools.write_file(DB_FILE, lambda f: f.write(db_pickle)) - except: logger.log_exc("database: failed in database.start.run") + try: + tools.write_file(DB_FILE, lambda f: f.write(db_pickle)) + logger.log_database(db) + except: + logger.log_exc("database.start: failed to pickle/dump") global db try: f = open(DB_FILE) @@ -159,4 +198,5 @@ def start(): except: logger.log_exc("database: failed in start") db = Database() + logger.log('database.start') tools.as_daemon_thread(run)