2 # NodeManager plugin - first step of handling reservable nodes
3 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
7 Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared'
17 # there is an implicit assumption that this triggers after slicemanager
20 # this instructs nodemanager that we want to use the latest known data in case the plc link is down
21 persistent_data = True
23 # of course things would be simpler if node manager was to create one instance of the plugins
24 # instead of blindly caling functions in the module...
26 ##############################
27 # rough implementation for a singleton class
28 def Singleton (klass,*args,**kwds):
29 if not hasattr(klass,'_instance'):
30 klass._instance=klass(*args,**kwds)
31 return klass._instance
34 return Singleton(reservation).start()
36 def GetSlivers(data, conf = None, plc = None):
37 return Singleton(reservation).GetSlivers(data, conf, plc)
39 ##############################
46 # the last snapshot of data exposed by GetSlivers
48 # this is a dict mapping a raounded timestamp to the corr. Timer object
53 logger.log("reservation: plugin performing dummy start...")
55 # this method is entirely about making sure that we have events scheduled
56 # at the <granularity> intervals where there is a lease that starts or ends
57 def GetSlivers (self, data, conf=None, plc=None):
59 # check we're using a compliant GetSlivers
60 if 'reservation_policy' not in data:
61 logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
63 reservation_policy=data['reservation_policy']
64 if 'leases' not in data:
65 logger.log_missing_data("reservation.GetSlivers",'leases')
69 # since we've asked for persistent_data, we should not get an empty data here
70 if data: self.data = data
72 # regular nodes are not affected
73 if reservation_policy == 'none':
75 elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
76 logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
78 # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
79 # we make no difference for now
80 logger.log('reservation.GetSlivers : reservable node -- listing timers ')
82 self.sync_timers_from_leases()
87 # the granularity is set in the API (initial value is 15 minutes)
88 # and it used to round all leases start/until times
89 # changing this dynamically can have some weird effects of course..
90 def granularity (self):
92 return self.data['lease_granularity']
93 # in case we'd try to access this before it's populated..
97 # round to granularity
98 def round_time (self, time):
99 granularity=self.granularity()
100 return ((int(time)+granularity/2)/granularity)*granularity
102 def clear_timers (self):
103 for timer in self.timers.values():
107 def sync_timers_from_leases (self):
109 for lease in self.data['leases']:
110 self.ensure_timer_from_until(lease['t_from'],lease['t_until'])
113 def ensure_timer_from_until (self, t1,t2):
115 # both times are in the past: forget about it
117 # we're in the middle of the lease: make sure to arm a callback in the near future for checking
118 # this mostly is for starting the slice if nodemanager gets started in the middle of a lease
120 self.ensure_timer (now+5)
121 # both are in the future : arm them
123 self.ensure_timer (now,self.round_time(t1))
124 self.ensure_timer (now,self.round_time(t2))
126 def ensure_timer(self, now, timestamp):
127 if timestamp in self.timers: return
130 logger.log("TIMER trigering at %s (was armed at %s, expected to trigger at %s)"%\
131 (reservation.time_printable(time.time()),
132 reservation.time_printable(now),
133 reservation.time_printable(timestamp)))
134 self.granularity_callback (now)
135 timer=threading.Timer(timestamp-now,this_closure)
136 self.timers[timestamp]=timer
139 def list_timers(self):
140 timestamps=self.timers.keys()
142 for timestamp in timestamps:
143 logger.log('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
144 logger.log('reservation.GetSlivers : end listing timers')
148 def time_printable (timestamp):
149 return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
152 def lease_printable (lease):
153 d=dict ( lease.iteritems())
154 d['from']=reservation.time_printable(lease['t_from'])
155 d['until']=reservation.time_printable(lease['t_from'])
157 s.append("slice=%(name)s (%(slice_id)d)"%d)
158 s.append("from %(from)s"%d)
159 s.append("until %(until)s"%d)
162 # this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
163 def granularity_callback (self, time_arg):
165 round_now=self.round_time(now)
166 leases=self.data['leases']
168 if reservation.debug:
169 logger.log('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
170 if leases and reservation.debug:
171 logger.log('reservation: Listing leases beg')
173 logger.log("reservation: lease="+reservation.lease_printable(lease))
174 logger.log('reservation: Listing leases end')
176 ### what do we have to do at this point in time?
179 if lease['t_until']==round_now:
180 logger.log('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
184 if lease['t_from']==round_now:
185 logger.log('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
189 if not ending_lease and not starting_lease:
190 logger.log("reservation.granularity_callback: make sure to start the running lease if relevant")
193 ## leases end and restart, about the same sliver
194 if ending_lease and starting_lease and ending_lease['name']==starting_lease['name']:
195 slicename=ending_lease['name']
196 if self.is_running(slicename):
197 logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
200 logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
201 self.restart_slice(slicename)
204 # otherwise things are simple
206 self.suspend_slice (ending_lease['name'])
207 if not starting_lease:
208 logger.log("'lease_or_shared' is xxx todo - would restart to shared mode")
209 # only lease_or_idle available : we freeze the box
210 self.suspend_all_slices()
212 self.restart_slice(starting_lease['name'])
215 # no ending, just one starting
216 logger.log("'lease_or_shared' is xxx todo - would stop shared mode")
217 # in lease_or_idle, all it takes is restart the sliver
218 self.restart_slice (starting_lease['name'])
221 def debug_box(self,message,slicename=None):
222 if reservation.debug:
223 logger.log ('reservation: '+message)
224 logger.log_call( ['/usr/sbin/vserver-stat', ] )
226 logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
228 def is_running (self, slicename):
230 return accounts.get(slicename).is_running()
234 # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy
235 def suspend_slice(self, slicename):
236 logger.log('reservation: Suspending slice %s'%(slicename))
237 self.debug_box('before suspending',slicename)
238 worker=accounts.get(slicename)
240 logger.log("reservation: Located worker object %r"%worker)
243 logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename)
244 # we hope the status line won't return anything
245 self.debug_box('after suspending',slicename)
247 def suspend_all_slices (self):
248 for sliver in self.data['slivers']:
249 # is this a system sliver ?
251 for d in sliver['attributes']:
252 if d['tagname']=='system' and d['value'] : system_slice=True
254 self.suspend_slice(sliver['name'])
256 def restart_slice(self, slicename):
257 logger.log('reservation: Restarting slice %s'%(slicename))
258 self.debug_box('before restarting',slicename)
259 worker=accounts.get(slicename)
261 # dig in self.data to retrieve corresponding rec
262 slivers = [ sliver for sliver in self.data['slivers'] if sliver['name']==slicename ]
264 record=database.db.get(slicename)
265 record['enabled']=True
267 logger.log("reservation: Located worker object %r"%worker)
268 logger.log("reservation: Located record at the db %r"%record)
271 logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename)
272 # we hope the status line won't return anything
273 self.debug_box('after restarting',slicename)