4 # NodeManager plugin - first step of handling reservable nodes
7 Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared'
17 # there is an implicit assumption that this triggers after slicemanager
20 # this instructs nodemanager that we want to use the latest known data in case the plc link is down
21 persistent_data = True
23 # of course things would be simpler if node manager was to create one instance of the plugins
24 # instead of blindly caling functions in the module...
26 ##############################
27 # rough implementation for a singleton class
28 def Singleton (klass,*args,**kwds):
29 if not hasattr(klass,'_instance'):
30 klass._instance=klass(*args,**kwds)
31 return klass._instance
34 return Singleton(reservation).start()
36 def GetSlivers(data, conf = None, plc = None):
37 return Singleton(reservation).GetSlivers(data, conf, plc)
39 ##############################
46 # the last snapshot of data exposed by GetSlivers
48 # this is a dict mapping a raounded timestamp to the corr. Timer object
53 logger.log("reservation: plugin performing dummy start...")
55 # this method is entirely about making sure that we have events scheduled
56 # at the <granularity> intervals where there is a lease that starts or ends
57 def GetSlivers (self, data, conf=None, plc=None):
59 # check we're using a compliant GetSlivers
60 if 'reservation_policy' not in data:
61 logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
63 reservation_policy=data['reservation_policy']
64 if 'leases' not in data:
65 logger.log_missing_data("reservation.GetSlivers",'leases')
69 # since we've asked for persistent_data, we should not get an empty data here
70 if data: self.data = data
72 # regular nodes are not affected
73 if reservation_policy == 'none':
75 elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
76 logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
78 # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
79 # we make no difference for now
80 logger.verbose('reservation.GetSlivers : reservable node -- listing timers ')
82 self.sync_timers_from_leases()
87 # the granularity is set in the API (initial value is 15 minutes)
88 # and it used to round all leases start/until times
89 # changing this dynamically can have some weird effects of course..
90 def granularity (self):
92 return self.data['lease_granularity']
93 # in case we'd try to access this before it's populated..
97 # round to granularity
98 def round_time (self, time):
99 granularity=self.granularity()
100 return ((int(time)+granularity/2)/granularity)*granularity
102 def clear_timers (self):
103 for timer in self.timers.values():
107 def sync_timers_from_leases (self):
109 for lease in self.data['leases']:
110 self.ensure_timer(lease['t_from'])
111 self.ensure_timer(lease['t_until'])
113 def list_timers(self):
114 timestamps=self.timers.keys()
116 for timestamp in timestamps:
117 logger.verbose('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
118 logger.verbose('reservation.GetSlivers : end listing timers')
120 def ensure_timer(self, timestamp):
122 # forget about past events
123 if timestamp < now: return
124 round=self.round_time(timestamp)
125 if round in self.timers: return
127 self.granularity_callback (round)
128 timer=threading.Timer(timestamp-now,this_closure)
129 self.timers[round]=timer
134 def time_printable (timestamp):
135 return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
138 def lease_printable (lease):
139 d=dict ( lease.iteritems())
140 d['from']=reservation.time_printable(lease['t_from'])
141 d['until']=reservation.time_printable(lease['t_from'])
143 s.append("slice=%(name)s (%(slice_id)d)"%d)
144 s.append("from %(from)s"%d)
145 s.append("until %(until)s"%d)
148 # this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
149 def granularity_callback (self, time_arg):
151 round_now=self.round_time(now)
152 leases=self.data['leases']
154 if reservation.debug:
155 logger.verbose('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
156 if leases and reservation.debug:
157 logger.verbose('reservation: Listing leases beg')
159 logger.verbose("reservation: lease="+reservation.lease_printable(lease))
160 logger.verbose('reservation: Listing leases end')
162 ### what do we have to do at this point in time?
165 if lease['t_until']==round_now:
166 logger.verbose('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
170 if lease['t_from']==round_now:
171 logger.verbose('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
175 if not ending_lease and not starting_lease:
176 logger.log("reservation.granularity_callback: unexpected void event")
179 ## leases end and restart, about the same sliver
180 if ending_lease and starting_lease and ending_lease['name']==starting_lease['name']:
181 slicename=ending_lease['name']
182 if self.is_running(slicename):
183 logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
186 logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
187 self.restart_slice(slicename)
190 # otherwise things are simple
192 self.suspend_slice (ending_lease['name'])
193 if not starting_lease:
194 logger.log("'lease_or_shared' is xxx todo - would restart to shared mode")
195 # only lease_or_idle available : we freeze the box
196 self.suspend_all_slices()
198 self.restart_slice(starting_lease['name'])
201 # no ending, just one starting
202 logger.log("'lease_or_shared' is xxx todo - would stop shared mode")
203 # in lease_or_idle, all it takes is restart the sliver
204 self.restart_slice (starting_lease['name'])
207 def debug_box(self,message,slicename=None):
208 if reservation.debug:
209 logger.verbose ('reservation: '+message)
210 logger.log_call( ['/usr/sbin/vserver-stat', ] )
212 logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
214 def is_running (self, slicename):
216 return accounts.get(slicename).is_running()
220 # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy
221 def suspend_slice(self, slicename):
222 logger.log('reservation: Suspending slice %s'%(slicename))
223 self.debug_box('before suspending',slicename)
224 worker=accounts.get(slicename)
226 logger.verbose("reservation: Located worker object %r"%worker)
229 logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename)
230 # we hope the status line won't return anything
231 self.debug_box('after suspending',slicename)
233 def suspend_all_slices (self):
234 for sliver in self.data['slivers']:
235 # is this a system sliver ?
237 for d in sliver['attributes']:
238 if d['tagname']=='system' and d['value'] : system_slice=True
240 self.suspend_slice(sliver['name'])
242 def restart_slice(self, slicename):
243 logger.log('reservation: Restarting slice %s'%(slicename))
244 self.debug_box('before restarting',slicename)
245 worker=accounts.get(slicename)
247 # dig in self.data to retrieve corresponding rec
248 slivers = [ sliver for sliver in self.data['slivers'] if sliver['name']==slicename ]
250 record=database.db.get(slicename)
251 record['enabled']=True
253 logger.verbose("reservation: Located worker object %r"%worker)
254 logger.verbose("reservation: Located record at the db %r"%record)
257 logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename)
258 # we hope the status line won't return anything
259 self.debug_box('after restarting',slicename)