checkpoint
[nodemanager.git] / plugins / reservation.py
1 # $Id$
2 # $URL$
3 #
4 # NodeManager plugin - first step of handling reservable nodes
5
6 """
7 Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared'
8 """
9
10 import time
11 import threading
12
13 import logger
14 import accounts
15
16 # there is an implicit assumption that this triggers after slicemanager
17 priority = 45
18
19 # this instructs nodemanager that we want to use the latest known data in case the plc link is down
20 persistent_data = True
21
22 # of course things would be simpler if node manager was to create one instance of the plugins 
23 # instead of blindly caling functions in the module...
24
25 ##############################
26 # rough implementation for a singleton class
27 def Singleton (klass,*args,**kwds):
28     if not hasattr(klass,'_instance'):
29         klass._instance=klass(*args,**kwds)
30     return klass._instance
31
32 def start(options, conf):
33     return Singleton(reservation).start(options,conf)
34
35 def GetSlivers(data, conf = None, plc = None):
36     return Singleton(reservation).GetSlivers(data, conf, plc)
37
38 ##############################
39 class reservation:
40
41     debug=False
42     debug=True
43
44     def __init__ (self):
45         # the last snapshot of data exposed by GetSlivers
46         self.data = None
47         # this is a dict mapping a raounded timestamp to the corr. Timer object
48         self.timers = {}
49  
50     ####################
51     def start(self,options,conf):
52         logger.log("reservation: plugin performing dummy start...")
53
54     # this method is entirely about making sure that we have events scheduled 
55     # at the <granularity> intervals where there is a lease that starts or ends
56     def GetSlivers (self, data, conf=None, plc=None):
57     
58         # check we're using a compliant GetSlivers
59         if 'reservation_policy' not in data: 
60             logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
61             return
62         reservation_policy=data['reservation_policy']
63         if 'leases' not in data: 
64             logger.log_missing_data("reservation.GetSlivers",'leases')
65             return
66
67         # store data locally
68         # since we've asked for persistent_data, we should not get an empty data here
69         if data: self.data = data
70
71         # regular nodes are not affected
72         if reservation_policy == 'none':
73             return
74         elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
75             logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
76             return
77         # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
78         # we make no difference for now
79         logger.verbose('reservation.GetSlivers : reservable node -- listing timers ')
80         
81         self.sync_timers_from_leases()
82         if reservation.debug:
83             self.list_timers()
84
85     ####################
86     # the granularity is set in the API (initial value is 15 minutes)
87     # and it used to round all leases start/until times
88     # changing this dynamically can have some weird effects of course..
89     def granularity (self):
90         try:
91             return self.data['lease_granularity']
92         # in case we'd try to access this before it's populated..
93         except:
94             return 60*60
95
96     # round to granularity
97     def round_time (self, time):
98         granularity=self.granularity()
99         return ((int(time)+granularity/2)/granularity)*granularity
100
101     def clear_timers (self):
102         for timer in self.timers.values():
103             timer.cancel()
104         self.timers={}
105
106     def sync_timers_from_leases (self):
107         self.clear_timers()
108         for lease in self.data['leases']:
109             self.ensure_timer(lease['t_from'])
110             self.ensure_timer(lease['t_until'])
111
112     def list_timers(self):
113         timestamps=self.timers.keys()
114         timestamps.sort()
115         for timestamp in timestamps:
116             logger.verbose('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
117         logger.verbose('reservation.GetSlivers : end listing timers')
118
119     def ensure_timer(self, timestamp):
120         now=time.time()
121         # forget about past events
122         if timestamp < now: return
123         round=self.round_time(timestamp)
124         if round in self.timers: return
125         def this_closure ():
126             self.granularity_callback (round)
127         timer=threading.Timer(timestamp-now,this_closure)
128         self.timers[round]=timer
129         timer.start()
130
131     
132     @staticmethod
133     def time_printable (timestamp):
134         return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
135
136     @staticmethod
137     def lease_printable (lease):
138         d=dict ( lease.iteritems())
139         d['from']=reservation.time_printable(lease['t_from'])
140         d['until']=reservation.time_printable(lease['t_from'])
141         s=[]
142         s.append("slice=%(name)s (%(slice_id)d)"%d)
143         s.append("from %(from)s"%d)
144         s.append("until %(until)s"%d)
145         return " ".join(s)
146
147     # this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
148     def granularity_callback (self, time_arg):
149         now=time.time()
150         round_now=self.round_time(now)
151         leases=self.data['leases']
152         ###
153         if reservation.debug:
154             logger.verbose('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
155         if leases and reservation.debug:
156             logger.verbose('reservation: Listing leases beg')
157             for lease in leases:
158                 logger.verbose("reservation: lease="+reservation.lease_printable(lease))
159             logger.verbose('reservation: Listing leases end')
160
161         ### what do we have to do at this point in time?
162         ending_lease=None
163         for lease in leases:
164             if lease['t_until']==round_now:
165                 logger.verbose('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
166                 ending_lease=lease
167         starting_lease=None
168         for lease in leases:
169             if lease['t_from']==round_now:
170                 logger.verbose('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
171                 starting_lease=lease
172
173         ## sanity check
174         if not ending_lease and not starting_lease:
175             logger.log("reservation.granularity_callback: unexpected void event")
176             return
177
178         ## leases end and restart, about the same sliver
179         if ending_lease and starting_lease and ending_lease['name']==starting_lease['name']:
180             slicename=ending_lease['name']
181             if self.is_running(slicename):
182                 logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
183                 return
184             else:
185                 logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
186                 self.restart_slice(slicename)
187
188         # otherwise things are simple
189         if ending_lease: self.suspend_slice (ending_lease['name'])
190         if starting_lease: self.restart_slice (starting_lease['name'])
191
192     def debug_box(self,message,slicename=None):
193         if reservation.debug:
194             logger.verbose ('reservation: '+message)
195             logger.log_call( ['/usr/sbin/vserver-stat', ] )
196             if slicename:
197                 logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
198         
199     def is_running (self, slicename):
200         try:
201             return accounts.get(slicename).is_running()
202         except:
203             return False
204
205     # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy 
206     def suspend_slice(self, slicename):
207         logger.log('reservation: Suspending slice %s'%(slicename))
208         self.debug_box('before suspending',slicename)
209         worker=accounts.get(slicename)
210         try:
211             logger.verbose("reservation: Located worker object %r"%worker)
212             worker.stop()
213         except:
214             logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename)
215         # we hope the status line won't return anything
216         self.debug_box('after suspending',slicename)
217                 
218     def restart_slice(self, slicename):
219         logger.log('reservation: Restarting slice %s'%(slicename))
220         self.debug_box('before restarting',slicename)
221         worker=accounts.get(slicename)
222         try:
223             # dig in self.data to retrieve corresponding rec
224             slivers = [ sliver for sliver in self.data.slivers if sliver['name']==slicename ]
225             sliver=slivers[0]
226             
227             # 
228             logger.verbose("reservation: Located worker object %r"%worker)
229             worker.start(rec)
230         except:
231             logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename)
232         # we hope the status line won't return anything
233         self.debug_box('after restarting',slicename)
234