2 # NodeManager plugin - first step of handling reservable nodes
3 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
7 Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared'
17 # there is an implicit assumption that this triggers after slicemanager
20 # this instructs nodemanager that we want to use the latest known data in case the plc link is down
21 persistent_data = True
23 # of course things would be simpler if node manager was to create one instance of the plugins
24 # instead of blindly caling functions in the module...
26 ##############################
27 # rough implementation for a singleton class
28 def Singleton (klass,*args,**kwds):
29 if not hasattr(klass,'_instance'):
30 klass._instance=klass(*args,**kwds)
31 return klass._instance
34 return Singleton(reservation).start()
36 def GetSlivers(data, conf = None, plc = None):
37 return Singleton(reservation).GetSlivers(data, conf, plc)
39 ##############################
46 # the last snapshot of data exposed by GetSlivers
48 # this is a dict mapping a raounded timestamp to the corr. Timer object
53 logger.log("reservation: plugin performing dummy start...")
55 # this method is entirely about making sure that we have events scheduled
56 # at the <granularity> intervals where there is a lease that starts or ends
57 def GetSlivers (self, data, conf=None, plc=None):
59 # check we're using a compliant GetSlivers
60 if 'reservation_policy' not in data:
61 logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
63 self.reservation_policy=data['reservation_policy']
64 if 'leases' not in data:
65 logger.log_missing_data("reservation.GetSlivers",'leases')
69 # since we've asked for persistent_data, we should not get an empty data here
70 if data: self.data = data
72 # regular nodes are not affected
73 if self.reservation_policy == 'none':
75 elif self.reservation_policy not in ['lease_or_idle','lease_or_shared']:
76 logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%self.reservation_policy)
78 # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
79 # we make no difference for now
80 logger.log("reservation.GetSlivers: reservable node -- policy=%s"%self.reservation_policy)
81 self.sync_timers_from_leases()
82 logger.log("reservation.GetSlivers: listing timers")
87 # the granularity is set in the API (initial value is 15 minutes)
88 # and it used to round all leases start/until times
89 # changing this dynamically can have some weird effects of course..
90 def granularity (self):
92 return self.data['lease_granularity']
93 # in case we'd try to access this before it's populated..
97 # round to granularity
98 def round_time (self, time):
99 granularity=self.granularity()
100 return ((int(time)+granularity/2)/granularity)*granularity
102 def clear_timers (self):
103 for timer in self.timers.values():
107 def sync_timers_from_leases (self):
109 for lease in self.data['leases']:
110 self.ensure_timer_from_until(lease['t_from'],lease['t_until'])
113 def ensure_timer_from_until (self, t1,t2):
115 # both times are in the past: forget about it
117 # we're in the middle of the lease: make sure to arm a callback in the near future for checking
118 # this mostly is for starting the slice if nodemanager gets started in the middle of a lease
120 self.ensure_timer (now,now+10)
121 # both are in the future : arm them
123 self.ensure_timer (now,self.round_time(t1))
124 self.ensure_timer (now,self.round_time(t2))
126 def ensure_timer(self, now, timestamp):
127 if timestamp in self.timers: return
130 logger.log("TIMER trigering at %s (was armed at %s, expected to trigger at %s)"%\
131 (reservation.time_printable(time.time()),
132 reservation.time_printable(now),
133 reservation.time_printable(timestamp)))
134 self.granularity_callback (now)
135 timer=threading.Timer(timestamp-now,this_closure)
136 self.timers[timestamp]=timer
139 def list_timers(self):
140 timestamps=self.timers.keys()
142 for timestamp in timestamps:
143 logger.log('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
144 logger.log('reservation.GetSlivers : end listing timers')
148 def time_printable (timestamp):
149 return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
152 def lease_printable (lease):
153 d=dict ( lease.iteritems())
154 d['from']=reservation.time_printable(lease['t_from'])
155 d['until']=reservation.time_printable(lease['t_from'])
157 s.append("slice=%(name)s (%(slice_id)d)"%d)
158 s.append("from %(from)s"%d)
159 s.append("until %(until)s"%d)
162 # this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
163 def granularity_callback (self, time_arg):
165 round_now=self.round_time(now)
166 leases=self.data['leases']
168 if reservation.debug:
169 logger.log('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
170 if leases and reservation.debug:
171 logger.log('reservation: Listing leases beg')
173 logger.log("reservation: lease="+reservation.lease_printable(lease))
174 logger.log('reservation: Listing leases end')
176 ### what do we have to do at this point in time?
179 if lease['t_until']==round_now:
180 logger.log('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
184 if lease['t_from']==round_now:
185 logger.log('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
188 ########## nothing is starting nor ending
189 if not ending_lease and not starting_lease:
190 ### this might be useful for robustness - not sure what to do now though
191 logger.log("reservation.granularity_callback: xxx todo - should make sure to start the running lease if relevant")
192 ########## something to start - something to end
193 elif ending_lease and starting_lease:
194 ## yes, but that's the same sliver
195 if ending_lease['name']==starting_lease['name']:
196 slicename=ending_lease['name']
197 if self.is_running(slicename):
198 logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
200 logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
201 self.restart_slice(slicename)
202 ## two different slivers
204 self.restart_slice(starting_lease['name'])
205 self.suspend_slice(ending_lease['name'])
206 ########## something to start, nothing to end
207 elif starting_lease and not ending_lease:
208 self.restart_slice(starting_lease['name'])
209 # with the lease_or_shared policy, this is mandatory
210 # otherwise it's just safe
211 self.suspend_all_slices(exclude=starting_lease['name'])
212 ########## so now, something to end, nothing to start
214 self.suspend_slice (ending_lease['name'])
215 if self.reservation_policy=='lease_or_shared':
216 logger.log("reservation.granularity_callback: 'lease_or_shared' not implemented - using 'lease_or_idle'")
217 # only lease_or_idle policy available for now: we freeze the box
218 logger.log("reservation.granularity_callback: suspending all slices")
219 self.suspend_all_slices()
221 def debug_box(self,message,slicename=None):
222 if reservation.debug:
223 logger.log ('reservation: '+message)
224 logger.log_call( ['/usr/sbin/vserver-stat', ] )
226 logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
228 def is_running (self, slicename):
230 return account.get(slicename).is_running()
234 # quick an d dirty - this does not obey the account/sliver_vs/controller hierarchy
235 def suspend_slice(self, slicename):
236 logger.log('reservation: Suspending slice %s'%(slicename))
237 self.debug_box('before suspending',slicename)
238 worker=account.get(slicename)
240 logger.log("reservation: Located worker object %r"%worker)
242 except AttributeError:
243 # when the underlying worker is not entirely initialized yet
246 logger.log_exc("reservation.suspend_slice: Could not stop slice through its worker",name=slicename)
247 # we hope the status line won't return anything
248 self.debug_box('after suspending',slicename)
250 # exclude can be a slicename or a list
251 # this probably should run in parallel
252 def suspend_all_slices (self, exclude=[]):
253 if isinstance(exclude,str): exclude=[exclude,]
254 for sliver in self.data['slivers']:
256 if sliver['name'] in exclude: continue
257 # is this a system sliver ?
259 for d in sliver['attributes']:
260 if d['tagname']=='system' and d['value'] : system_slice=True
261 if system_slice: continue
262 self.suspend_slice(sliver['name'])
264 def restart_slice(self, slicename):
265 logger.log('reservation: Restarting slice %s'%(slicename))
266 self.debug_box('before restarting',slicename)
267 worker=account.get(slicename)
269 # dig in self.data to retrieve corresponding rec
270 slivers = [ sliver for sliver in self.data['slivers'] if sliver['name']==slicename ]
272 record=database.db.get(slicename)
273 record['enabled']=True
275 logger.log("reservation: Located worker object %r"%worker)
276 logger.log("reservation: Located record at the db %r"%record)
279 logger.log_exc("reservation.restart_slice: Could not start slice through its worker",name=slicename)
280 # we hope the status line won't return anything
281 self.debug_box('after restarting',slicename)