-# $Id$
-# $URL$
#
# NodeManager plugin - first step of handling reservable nodes
+# Thierry Parmentelat <thierry.parmentelat@inria.fr>
+#
"""
Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared'
import threading
import logger
-import accounts
+import account
+import database
# there is an implicit assumption that this triggers after slicemanager
priority = 45
# this instructs nodemanager that we want to use the latest known data in case the plc link is down
persistent_data = True
-# of course things would be simpler if node manager was to create one instance of the plugins
+# of course things would be simpler if node manager was to create one instance of the plugins
# instead of blindly caling functions in the module...
##############################
klass._instance=klass(*args,**kwds)
return klass._instance
-def start(options, conf):
- return Singleton(reservation).start(options,conf)
+def start():
+ return Singleton(reservation).start()
def GetSlivers(data, conf = None, plc = None):
return Singleton(reservation).GetSlivers(data, conf, plc)
self.data = None
# this is a dict mapping a raounded timestamp to the corr. Timer object
self.timers = {}
-
+
####################
- def start(self,options,conf):
+ def start(self):
logger.log("reservation: plugin performing dummy start...")
- # this method is entirely about making sure that we have events scheduled
+ # this method is entirely about making sure that we have events scheduled
# at the <granularity> intervals where there is a lease that starts or ends
def GetSlivers (self, data, conf=None, plc=None):
-
+
# check we're using a compliant GetSlivers
- if 'reservation_policy' not in data:
+ if 'reservation_policy' not in data:
logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
return
- reservation_policy=data['reservation_policy']
- if 'leases' not in data:
+ self.reservation_policy=data['reservation_policy']
+ if 'leases' not in data:
logger.log_missing_data("reservation.GetSlivers",'leases')
return
if data: self.data = data
# regular nodes are not affected
- if reservation_policy == 'none':
+ if self.reservation_policy == 'none':
return
- elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
- logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
+ elif self.reservation_policy not in ['lease_or_idle','lease_or_shared']:
+ logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%self.reservation_policy)
return
# at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
# we make no difference for now
- logger.verbose('reservation.GetSlivers : reservable node -- listing timers ')
-
+ logger.log("reservation.GetSlivers: reservable node -- policy=%s"%self.reservation_policy)
self.sync_timers_from_leases()
+ logger.log("reservation.GetSlivers: listing timers")
if reservation.debug:
self.list_timers()
def sync_timers_from_leases (self):
self.clear_timers()
for lease in self.data['leases']:
- self.ensure_timer(lease['t_from'])
- self.ensure_timer(lease['t_until'])
+ self.ensure_timer_from_until(lease['t_from'],lease['t_until'])
+
+ # assuming t1<t2
+ def ensure_timer_from_until (self, t1,t2):
+ now=int(time.time())
+ # both times are in the past: forget about it
+ if t2 < now : return
+ # we're in the middle of the lease: make sure to arm a callback in the near future for checking
+ # this mostly is for starting the slice if nodemanager gets started in the middle of a lease
+ if t1 < now :
+ self.ensure_timer (now,now+10)
+ # both are in the future : arm them
+ else :
+ self.ensure_timer (now,self.round_time(t1))
+ self.ensure_timer (now,self.round_time(t2))
+
+ def ensure_timer(self, now, timestamp):
+ if timestamp in self.timers: return
+ def this_closure ():
+ import time
+ logger.log("TIMER trigering at %s (was armed at %s, expected to trigger at %s)"%\
+ (reservation.time_printable(time.time()),
+ reservation.time_printable(now),
+ reservation.time_printable(timestamp)))
+ self.granularity_callback (now)
+ timer=threading.Timer(timestamp-now,this_closure)
+ self.timers[timestamp]=timer
+ timer.start()
def list_timers(self):
timestamps=self.timers.keys()
timestamps.sort()
for timestamp in timestamps:
- logger.verbose('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
- logger.verbose('reservation.GetSlivers : end listing timers')
-
- def ensure_timer(self, timestamp):
- now=time.time()
- # forget about past events
- if timestamp < now: return
- round=self.round_time(timestamp)
- if round in self.timers: return
- def this_closure ():
- self.granularity_callback (round)
- timer=threading.Timer(timestamp-now,this_closure)
- self.timers[round]=timer
- timer.start()
+ logger.log('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
+ logger.log('reservation.GetSlivers : end listing timers')
+
-
@staticmethod
def time_printable (timestamp):
return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
# this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
def granularity_callback (self, time_arg):
- now=time.time()
+ now=int(time.time())
round_now=self.round_time(now)
leases=self.data['leases']
###
if reservation.debug:
- logger.verbose('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
+ logger.log('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
if leases and reservation.debug:
- logger.verbose('reservation: Listing leases beg')
+ logger.log('reservation: Listing leases beg')
for lease in leases:
- logger.verbose("reservation: lease="+reservation.lease_printable(lease))
- logger.verbose('reservation: Listing leases end')
+ logger.log("reservation: lease="+reservation.lease_printable(lease))
+ logger.log('reservation: Listing leases end')
### what do we have to do at this point in time?
ending_lease=None
for lease in leases:
if lease['t_until']==round_now:
- logger.verbose('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
+ logger.log('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
ending_lease=lease
starting_lease=None
for lease in leases:
if lease['t_from']==round_now:
- logger.verbose('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
+ logger.log('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
starting_lease=lease
- ## sanity check
+ ########## nothing is starting nor ending
if not ending_lease and not starting_lease:
- logger.log("reservation.granularity_callback: unexpected void event")
- return
-
- ## leases end and restart, about the same sliver
- if ending_lease and starting_lease and ending_lease['name']==starting_lease['name']:
- slicename=ending_lease['name']
- if self.is_running(slicename):
- logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
- return
+ ### this might be useful for robustness - not sure what to do now though
+ logger.log("reservation.granularity_callback: xxx todo - should make sure to start the running lease if relevant")
+ ########## something to start - something to end
+ elif ending_lease and starting_lease:
+ ## yes, but that's the same sliver
+ if ending_lease['name']==starting_lease['name']:
+ slicename=ending_lease['name']
+ if self.is_running(slicename):
+ logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
+ else:
+ logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
+ self.restart_slice(slicename)
+ ## two different slivers
else:
- logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
- self.restart_slice(slicename)
-
- # otherwise things are simple
- if ending_lease: self.suspend_slice (ending_lease['name'])
- if starting_lease: self.restart_slice (starting_lease['name'])
+ self.restart_slice(starting_lease['name'])
+ self.suspend_slice(ending_lease['name'])
+ ########## something to start, nothing to end
+ elif starting_lease and not ending_lease:
+ self.restart_slice(starting_lease['name'])
+ # with the lease_or_shared policy, this is mandatory
+ # otherwise it's just safe
+ self.suspend_all_slices(exclude=starting_lease['name'])
+ ########## so now, something to end, nothing to start
+ else:
+ self.suspend_slice (ending_lease['name'])
+ if self.reservation_policy=='lease_or_shared':
+ logger.log("reservation.granularity_callback: 'lease_or_shared' not implemented - using 'lease_or_idle'")
+ # only lease_or_idle policy available for now: we freeze the box
+ logger.log("reservation.granularity_callback: suspending all slices")
+ self.suspend_all_slices()
def debug_box(self,message,slicename=None):
if reservation.debug:
- logger.verbose ('reservation: '+message)
+ logger.log ('reservation: '+message)
logger.log_call( ['/usr/sbin/vserver-stat', ] )
if slicename:
logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
-
+
def is_running (self, slicename):
try:
- return accounts.get(slicename).is_running()
+ return account.get(slicename).is_running()
except:
return False
- # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy
+ # quick an d dirty - this does not obey the account/sliver_vs/controller hierarchy
def suspend_slice(self, slicename):
logger.log('reservation: Suspending slice %s'%(slicename))
self.debug_box('before suspending',slicename)
- worker=accounts.get(slicename)
+ worker=account.get(slicename)
try:
- logger.verbose("reservation: Located worker object %r"%worker)
+ logger.log("reservation: Located worker object %r"%worker)
worker.stop()
+ except AttributeError:
+ # when the underlying worker is not entirely initialized yet
+ pass
except:
logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename)
# we hope the status line won't return anything
self.debug_box('after suspending',slicename)
-
+
+ # exclude can be a slicename or a list
+ # this probably should run in parallel
+ def suspend_all_slices (self, exclude=[]):
+ if isinstance(exclude,str): exclude=[exclude,]
+ for sliver in self.data['slivers']:
+ # skip excluded
+ if sliver['name'] in exclude: continue
+ # is this a system sliver ?
+ system_slice=False
+ for d in sliver['attributes']:
+ if d['tagname']=='system' and d['value'] : system_slice=True
+ if system_slice: continue
+ self.suspend_slice(sliver['name'])
+
def restart_slice(self, slicename):
logger.log('reservation: Restarting slice %s'%(slicename))
self.debug_box('before restarting',slicename)
- worker=accounts.get(slicename)
+ worker=account.get(slicename)
try:
# dig in self.data to retrieve corresponding rec
- slivers = [ sliver for sliver in self.data.slivers if sliver['name']==slicename ]
+ slivers = [ sliver for sliver in self.data['slivers'] if sliver['name']==slicename ]
sliver=slivers[0]
-
- #
- logger.verbose("reservation: Located worker object %r"%worker)
- worker.start(rec)
+ record=database.db.get(slicename)
+ record['enabled']=True
+ #
+ logger.log("reservation: Located worker object %r"%worker)
+ logger.log("reservation: Located record at the db %r"%record)
+ worker.start(record)
except:
logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename)
# we hope the status line won't return anything
self.debug_box('after restarting',slicename)
-