checkpoint
authorThierry Parmentelat <thierry.parmentelat@sophia.inria.fr>
Fri, 11 Jun 2010 15:07:43 +0000 (15:07 +0000)
committerThierry Parmentelat <thierry.parmentelat@sophia.inria.fr>
Fri, 11 Jun 2010 15:07:43 +0000 (15:07 +0000)
plugins/reservation.py

index d5fdde6..5d3a520 100644 (file)
@@ -11,7 +11,9 @@ import time
 import threading
 
 import logger
+import accounts
 
+# there is an implicit assumption that this triggers after slicemanager
 priority = 45
 
 # this instructs nodemanager that we want to use the latest known data in case the plc link is down
@@ -36,12 +38,51 @@ def GetSlivers(data, conf = None, plc = None):
 ##############################
 class reservation:
 
+    debug=False
+    debug=True
+
     def __init__ (self):
         # the last snapshot of data exposed by GetSlivers
         self.data = None
         # this is a dict mapping a raounded timestamp to the corr. Timer object
         self.timers = {}
  
+    ####################
+    def start(self,options,conf):
+        logger.log("reservation: plugin performing dummy start...")
+
+    # this method is entirely about making sure that we have events scheduled 
+    # at the <granularity> intervals where there is a lease that starts or ends
+    def GetSlivers (self, data, conf=None, plc=None):
+    
+        # check we're using a compliant GetSlivers
+        if 'reservation_policy' not in data: 
+            logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
+            return
+        reservation_policy=data['reservation_policy']
+        if 'leases' not in data: 
+            logger.log_missing_data("reservation.GetSlivers",'leases')
+            return
+
+        # store data locally
+        # since we've asked for persistent_data, we should not get an empty data here
+        if data: self.data = data
+
+        # regular nodes are not affected
+        if reservation_policy == 'none':
+            return
+        elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
+            logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
+            return
+        # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
+        # we make no difference for now
+        logger.verbose('reservation.GetSlivers : reservable node -- listing timers ')
+        
+        self.sync_timers_from_leases()
+        if reservation.debug:
+            self.list_timers()
+
+    ####################
     # the granularity is set in the API (initial value is 15 minutes)
     # and it used to round all leases start/until times
     # changing this dynamically can have some weird effects of course..
@@ -62,19 +103,19 @@ class reservation:
             timer.cancel()
         self.timers={}
 
-    def clear_timer (self,timestamp):
-        round=self.round_time(timestamp)
-        if round in self.timers:
-            timer=self.timers[round]
-            timer.cancel()
-            del self.timers[round]
-
     def sync_timers_from_leases (self):
         self.clear_timers()
         for lease in self.data['leases']:
             self.ensure_timer(lease['t_from'])
             self.ensure_timer(lease['t_until'])
 
+    def list_timers(self):
+        timestamps=self.timers.keys()
+        timestamps.sort()
+        for timestamp in timestamps:
+            logger.verbose('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
+        logger.verbose('reservation.GetSlivers : end listing timers')
+
     def ensure_timer(self, timestamp):
         now=time.time()
         # forget about past events
@@ -82,75 +123,112 @@ class reservation:
         round=self.round_time(timestamp)
         if round in self.timers: return
         def this_closure ():
-            self.round_time_callback (round)
+            self.granularity_callback (round)
         timer=threading.Timer(timestamp-now,this_closure)
         self.timers[round]=timer
         timer.start()
 
-    def round_time_callback (self, time_arg):
+    
+    @staticmethod
+    def time_printable (timestamp):
+        return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
+
+    @staticmethod
+    def lease_printable (lease):
+        d=dict ( lease.iteritems())
+        d['from']=reservation.time_printable(lease['t_from'])
+        d['until']=reservation.time_printable(lease['t_from'])
+        s=[]
+        s.append("slice=%(name)s (%(slice_id)d)"%d)
+        s.append("from %(from)s"%d)
+        s.append("until %(until)s"%d)
+        return " ".join(s)
+
+    # this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
+    def granularity_callback (self, time_arg):
         now=time.time()
         round_now=self.round_time(now)
-        logger.log('reservation.round_time_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
-        leases=self.data.leases
-        if leases:
-            logger.verbose('Listing leases beg')
+        leases=self.data['leases']
+        ###
+        if reservation.debug:
+            logger.verbose('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
+        if leases and reservation.debug:
+            logger.verbose('reservation: Listing leases beg')
             for lease in leases:
-                logger.verbose("lease=%r"%lease)
-            logger.verbose('Listing leases end')
+                logger.verbose("reservation: lease="+reservation.lease_printable(lease))
+            logger.verbose('reservation: Listing leases end')
+
+        ### what do we have to do at this point in time?
+        ending_lease=None
         for lease in leases:
             if lease['t_until']==round_now:
-                logger.log('Suspending slice %s - ending lease %d'%(lease['name'],lease['lease_id']))
-                self.suspend_slice (lease['name'])
+                logger.verbose('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
+                ending_lease=lease
+        starting_lease=None
         for lease in leases:
             if lease['t_from']==round_now:
-                logger.log('Starting slice %s - starting lease %d'%(lease['name'],lease['lease_id']))
-                self.restart_slice (lease['name'])
+                logger.verbose('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
+                starting_lease=lease
 
+        ## sanity check
+        if not ending_lease and not starting_lease:
+            logger.log("reservation.granularity_callback: unexpected void event")
+            return
 
+        ## leases end and restart, about the same sliver
+        if ending_lease and starting_lease and ending_lease['name']==starting_lease['name']:
+            slicename=ending_lease['name']
+            if self.is_running(slicename):
+                logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
+                return
+            else:
+                logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
+                self.restart_slice(slicename)
+
+        # otherwise things are simple
+        if ending_lease: self.suspend_slice (ending_lease['name'])
+        if starting_lease: self.restart_slice (starting_lease['name'])
+
+    def debug_box(self,message,slicename=None):
+        if reservation.debug:
+            logger.verbose ('reservation: '+message)
+            logger.log_call( ['/usr/sbin/vserver-stat', ] )
+            if slicename:
+                logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
+        
+    def is_running (self, slicename):
+        try:
+            return accounts.get(slicename).is_running()
+        except:
+            return False
+
+    # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy 
     def suspend_slice(self, slicename):
-        logger.log('reservation.suspend_slice, slice %s, to be written'%slicename)
+        logger.log('reservation: Suspending slice %s'%(slicename))
+        self.debug_box('before suspending',slicename)
+        worker=accounts.get(slicename)
+        try:
+            logger.verbose("reservation: Located worker object %r"%worker)
+            worker.stop()
+        except:
+            logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename)
+        # we hope the status line won't return anything
+        self.debug_box('after suspending',slicename)
                 
     def restart_slice(self, slicename):
-        logger.log('reservation.restart_slice, slice %s, to be written'%slicename)
-
-    def show_time (self, timestamp):
-        return time.strftime ("%Y-%m-%d %H:%M %Z",time.gmtime(timestamp))
-
-    ####################
-    def start(self,options,conf):
-        logger.log("reservation: plugin performing dummy start...")
-
-    # this method is entirely about making sure that we have events scheduled 
-    # at the <granularity> intervals where there is a lease that starts or ends
-    def GetSlivers (self, data, conf=None, plc=None):
-    
-        # check we're using a compliant GetSlivers
-        if 'reservation_policy' not in data: 
-            logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
-            return
-        reservation_policy=data['reservation_policy']
-        if 'leases' not in data: 
-            logger.log_missing_data("reservation.GetSlivers",'leases')
-            return
-    
-
-        # store data locally
-        # since we've asked for persistent_data, we should not get an empty data here
-        if data: self.data = data
-
-        # regular nodes are not affected
-        if reservation_policy == 'none':
-            return
-        elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
-            logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
-            return
-        # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
-        # we make no difference for now
-        logger.verbose('reservation.GetSlivers : reservable node -- listing timers ')
-        
-        self.sync_timers_from_leases()
-        for timestamp in self.timers.keys():
-            logger.verbose('TIMER armed for %s'%self.show_time(timestamp))
-           
-        logger.verbose('reservation.GetSlivers : end listing timers')
+        logger.log('reservation: Restarting slice %s'%(slicename))
+        self.debug_box('before restarting',slicename)
+        worker=accounts.get(slicename)
+        try:
+            # dig in self.data to retrieve corresponding rec
+            slivers = [ sliver for sliver in self.data.slivers if sliver['name']==slicename ]
+            sliver=slivers[0]
+            
+            # 
+            logger.verbose("reservation: Located worker object %r"%worker)
+            worker.start(rec)
+        except:
+            logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename)
+        # we hope the status line won't return anything
+        self.debug_box('after restarting',slicename)