X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=plugins%2Freservation.py;h=dd422b80893079aa57f9929d85ec87a6cdb74b2b;hb=48a73b18fd7daed13c645c1adeddb57b560e7a2d;hp=ddb974b75d7893d3ddf80cf37aa4409b6a7d6b26;hpb=164e7fc96baccd6ae5caa57b794fb9966167eca2;p=nodemanager.git diff --git a/plugins/reservation.py b/plugins/reservation.py index ddb974b..dd422b8 100644 --- a/plugins/reservation.py +++ b/plugins/reservation.py @@ -1,7 +1,7 @@ -# $Id$ -# $URL$ # # NodeManager plugin - first step of handling reservable nodes +# Thierry Parmentelat +# """ Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared' @@ -11,24 +11,27 @@ import time import threading import logger +import account +import database +# there is an implicit assumption that this triggers after slicemanager priority = 45 # this instructs nodemanager that we want to use the latest known data in case the plc link is down persistent_data = True -# of course things would be simpler if node manager was to create one instance of the plugins +# of course things would be simpler if node manager was to create one instance of the plugins # instead of blindly caling functions in the module... ############################## # rough implementation for a singleton class -def Singleton (klass,*args,**kwds): - if not hasattr(klass,'_instance'): - klass._instance=klass(*args,**kwds) +def Singleton (klass, *args, **kwds): + if not hasattr(klass, '_instance'): + klass._instance=klass(*args, **kwds) return klass._instance -def start(options, conf): - return Singleton(reservation).start(options,conf) +def start(): + return Singleton(reservation).start() def GetSlivers(data, conf = None, plc = None): return Singleton(reservation).GetSlivers(data, conf, plc) @@ -36,12 +39,51 @@ def GetSlivers(data, conf = None, plc = None): ############################## class reservation: + debug=False + debug=True + def __init__ (self): # the last snapshot of data exposed by GetSlivers self.data = None # this is a dict mapping a raounded timestamp to the corr. Timer object self.timers = {} - + + #################### + def start(self): + logger.log("reservation: plugin performing dummy start...") + + # this method is entirely about making sure that we have events scheduled + # at the intervals where there is a lease that starts or ends + def GetSlivers (self, data, conf=None, plc=None): + + # check we're using a compliant GetSlivers + if 'reservation_policy' not in data: + logger.log_missing_data("reservation.GetSlivers", 'reservation_policy') + return + self.reservation_policy=data['reservation_policy'] + if 'leases' not in data: + logger.log_missing_data("reservation.GetSlivers", 'leases') + return + + # store data locally + # since we've asked for persistent_data, we should not get an empty data here + if data: self.data = data + + # regular nodes are not affected + if self.reservation_policy == 'none': + return + elif self.reservation_policy not in ['lease_or_idle', 'lease_or_shared']: + logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%self.reservation_policy) + return + # at this point we have reservation_policy in ['lease_or_idle', 'lease_or_shared'] + # we make no difference for now + logger.log("reservation.GetSlivers: reservable node -- policy=%s"%self.reservation_policy) + self.sync_timers_from_leases() + logger.log("reservation.GetSlivers: listing timers") + if reservation.debug: + self.list_timers() + + #################### # the granularity is set in the API (initial value is 15 minutes) # and it used to round all leases start/until times # changing this dynamically can have some weird effects of course.. @@ -58,80 +100,182 @@ class reservation: return ((int(time)+granularity/2)/granularity)*granularity def clear_timers (self): - for timer in self.timers.values(): + for timer in list(self.timers.values()): timer.cancel() self.timers={} - def clear_timer (self,timestamp): - round=self.round_time(timestamp) - if self.timers.has_key(round): - timer=self.timers[round] - timer.cancel() - del self.timers[round] - def sync_timers_from_leases (self): self.clear_timers() for lease in self.data['leases']: - self.ensure_timer(lease['t_from']) - self.ensure_timer(lease['t_until']) - - def ensure_timer(self, timestamp): - now=time.time() - # forget about past events - if timestamp < now: return - round=self.round_time(timestamp) - if self.timers.has_key(round): return + self.ensure_timer_from_until(lease['t_from'], lease['t_until']) + + # assuming t1 intervals where there is a lease that starts or ends - def GetSlivers (self, data, conf=None, plc=None): - - # check we're using a compliant GetSlivers - if 'reservation_policy' not in data: - logger.log_missing_data("reservation.GetSlivers",'reservation_policy') - return - reservation_policy=data['reservation_policy'] - if 'leases' not in data: - logger.log_missing_data("reservation.GetSlivers",'leases') - return - + def debug_box(self, message, slicename=None): + if reservation.debug: + logger.log ('reservation: '+message) + logger.log_call( ['/usr/sbin/vserver-stat', ] ) + if slicename: + logger.log_call ( ['/usr/sbin/vserver', slicename, 'status', ]) - # store data locally - # since we've asked for persistent_data, we should not get an empty data here - if data: self.data = data + def is_running (self, slicename): + try: + return account.get(slicename).is_running() + except: + return False - # regular nodes are not affected - if reservation_policy == 'none': - return - elif reservation_policy not in ['lease_or_idle','lease_or_shared']: - logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy) - return - # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared'] - # we make no difference for now - logger.verbose('reservation.GetSlivers : reservable node -- listing timers ') - - self.sync_timers_from_leases() - for timestamp in self.timers.keys(): - logger.verbose('TIMER armed for %s'%self.show_time(timestamp)) - - logger.verbose('reservation.GetSlivers : end listing timers') - + # quick an d dirty - this does not obey the account/sliver_vs/controller hierarchy + def suspend_slice(self, slicename): + logger.log('reservation: Suspending slice %s'%(slicename)) + self.debug_box('before suspending', slicename) + worker=account.get(slicename) + try: + logger.log("reservation: Located worker object %r"%worker) + worker.stop() + except AttributeError: + # when the underlying worker is not entirely initialized yet + pass + except: + logger.log_exc("reservation.suspend_slice: Could not stop slice through its worker", name=slicename) + # we hope the status line won't return anything + self.debug_box('after suspending', slicename) + + # exclude can be a slicename or a list + # this probably should run in parallel + def suspend_all_slices (self, exclude=[]): + if isinstance(exclude, str): exclude=[exclude,] + for sliver in self.data['slivers']: + # skip excluded + if sliver['name'] in exclude: continue + # is this a system sliver ? + system_slice=False + for d in sliver['attributes']: + if d['tagname']=='system' and d['value'] : system_slice=True + if system_slice: continue + self.suspend_slice(sliver['name']) + + def restart_slice(self, slicename): + logger.log('reservation: Restarting slice %s'%(slicename)) + self.debug_box('before restarting', slicename) + worker=account.get(slicename) + try: + # dig in self.data to retrieve corresponding rec + slivers = [ sliver for sliver in self.data['slivers'] if sliver['name']==slicename ] + sliver=slivers[0] + record=database.db.get(slicename) + record['enabled']=True + # + logger.log("reservation: Located worker object %r"%worker) + logger.log("reservation: Located record at the db %r"%record) + worker.start(record) + except: + logger.log_exc("reservation.restart_slice: Could not start slice through its worker", name=slicename) + # we hope the status line won't return anything + self.debug_box('after restarting', slicename)