6b19691ef19a6530f36d4833364887d111a37489
[nodemanager.git] / plugins / reservation.py
1 # $Id$
2 # $URL$
3 #
4 # NodeManager plugin - first step of handling reservable nodes
5
6 """
7 Manages running slices when reservation_policy is 'lease_or_idle' or 'lease_or_shared'
8 """
9
10 import time
11 import threading
12
13 import logger
14 import accounts
15 import database
16
17 # there is an implicit assumption that this triggers after slicemanager
18 priority = 45
19
20 # this instructs nodemanager that we want to use the latest known data in case the plc link is down
21 persistent_data = True
22
23 # of course things would be simpler if node manager was to create one instance of the plugins
24 # instead of blindly caling functions in the module...
25
26 ##############################
27 # rough implementation for a singleton class
28 def Singleton (klass,*args,**kwds):
29     if not hasattr(klass,'_instance'):
30         klass._instance=klass(*args,**kwds)
31     return klass._instance
32
33 def start(options, conf):
34     return Singleton(reservation).start(options,conf)
35
36 def GetSlivers(data, conf = None, plc = None):
37     return Singleton(reservation).GetSlivers(data, conf, plc)
38
39 ##############################
40 class reservation:
41
42     debug=False
43     debug=True
44
45     def __init__ (self):
46         # the last snapshot of data exposed by GetSlivers
47         self.data = None
48         # this is a dict mapping a raounded timestamp to the corr. Timer object
49         self.timers = {}
50
51     ####################
52     def start(self,options,conf):
53         logger.log("reservation: plugin performing dummy start...")
54
55     # this method is entirely about making sure that we have events scheduled
56     # at the <granularity> intervals where there is a lease that starts or ends
57     def GetSlivers (self, data, conf=None, plc=None):
58
59         # check we're using a compliant GetSlivers
60         if 'reservation_policy' not in data:
61             logger.log_missing_data("reservation.GetSlivers",'reservation_policy')
62             return
63         reservation_policy=data['reservation_policy']
64         if 'leases' not in data:
65             logger.log_missing_data("reservation.GetSlivers",'leases')
66             return
67
68         # store data locally
69         # since we've asked for persistent_data, we should not get an empty data here
70         if data: self.data = data
71
72         # regular nodes are not affected
73         if reservation_policy == 'none':
74             return
75         elif reservation_policy not in ['lease_or_idle','lease_or_shared']:
76             logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy)
77             return
78         # at this point we have reservation_policy in ['lease_or_idle','lease_or_shared']
79         # we make no difference for now
80         logger.verbose('reservation.GetSlivers : reservable node -- listing timers ')
81
82         self.sync_timers_from_leases()
83         if reservation.debug:
84             self.list_timers()
85
86     ####################
87     # the granularity is set in the API (initial value is 15 minutes)
88     # and it used to round all leases start/until times
89     # changing this dynamically can have some weird effects of course..
90     def granularity (self):
91         try:
92             return self.data['lease_granularity']
93         # in case we'd try to access this before it's populated..
94         except:
95             return 60*60
96
97     # round to granularity
98     def round_time (self, time):
99         granularity=self.granularity()
100         return ((int(time)+granularity/2)/granularity)*granularity
101
102     def clear_timers (self):
103         for timer in self.timers.values():
104             timer.cancel()
105         self.timers={}
106
107     def sync_timers_from_leases (self):
108         self.clear_timers()
109         for lease in self.data['leases']:
110             self.ensure_timer(lease['t_from'])
111             self.ensure_timer(lease['t_until'])
112
113     def list_timers(self):
114         timestamps=self.timers.keys()
115         timestamps.sort()
116         for timestamp in timestamps:
117             logger.verbose('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
118         logger.verbose('reservation.GetSlivers : end listing timers')
119
120     def ensure_timer(self, timestamp):
121         now=time.time()
122         # forget about past events
123         if timestamp < now: return
124         round=self.round_time(timestamp)
125         if round in self.timers: return
126         def this_closure ():
127             self.granularity_callback (round)
128         timer=threading.Timer(timestamp-now,this_closure)
129         self.timers[round]=timer
130         timer.start()
131
132
133     @staticmethod
134     def time_printable (timestamp):
135         return time.strftime ("%Y-%m-%d %H:%M UTC",time.gmtime(timestamp))
136
137     @staticmethod
138     def lease_printable (lease):
139         d=dict ( lease.iteritems())
140         d['from']=reservation.time_printable(lease['t_from'])
141         d['until']=reservation.time_printable(lease['t_from'])
142         s=[]
143         s.append("slice=%(name)s (%(slice_id)d)"%d)
144         s.append("from %(from)s"%d)
145         s.append("until %(until)s"%d)
146         return " ".join(s)
147
148     # this is invoked at the granularity boundaries where something happens (a lease ends or/and a lease starts)
149     def granularity_callback (self, time_arg):
150         now=time.time()
151         round_now=self.round_time(now)
152         leases=self.data['leases']
153         ###
154         if reservation.debug:
155             logger.verbose('reservation.granularity_callback now=%f round_now=%d arg=%d...'%(now,round_now,time_arg))
156         if leases and reservation.debug:
157             logger.verbose('reservation: Listing leases beg')
158             for lease in leases:
159                 logger.verbose("reservation: lease="+reservation.lease_printable(lease))
160             logger.verbose('reservation: Listing leases end')
161
162         ### what do we have to do at this point in time?
163         ending_lease=None
164         for lease in leases:
165             if lease['t_until']==round_now:
166                 logger.verbose('reservation: end of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
167                 ending_lease=lease
168         starting_lease=None
169         for lease in leases:
170             if lease['t_from']==round_now:
171                 logger.verbose('reservation: start of lease for slice %s - (lease=%s)'%(lease['name'],reservation.lease_printable(lease)))
172                 starting_lease=lease
173
174         ## sanity check
175         if not ending_lease and not starting_lease:
176             logger.log("reservation.granularity_callback: unexpected void event")
177             return
178
179         ## leases end and restart, about the same sliver
180         if ending_lease and starting_lease and ending_lease['name']==starting_lease['name']:
181             slicename=ending_lease['name']
182             if self.is_running(slicename):
183                 logger.log("reservation.granularity_callback: end/start of same sliver %s -- ignored"%ending_lease['name'])
184                 return
185             else:
186                 logger.log("reservation.granularity_callback: mmh, the sliver is unexpectedly not running, starting it...")
187                 self.restart_slice(slicename)
188             return
189
190         # otherwise things are simple
191         if ending_lease:
192             self.suspend_slice (ending_lease['name'])
193             if not starting_lease:
194                 logger.log("'lease_or_shared' is xxx todo - would restart to shared mode")
195                 # only lease_or_idle available : we freeze the box
196                 self.suspend_all_slices()
197             else:
198                 self.restart_slice(starting_lease['name'])
199             return
200
201         # no ending, just one starting
202         logger.log("'lease_or_shared' is xxx todo - would stop shared mode")
203         # in lease_or_idle, all it takes is restart the sliver
204         self.restart_slice (starting_lease['name'])
205         return
206
207     def debug_box(self,message,slicename=None):
208         if reservation.debug:
209             logger.verbose ('reservation: '+message)
210             logger.log_call( ['/usr/sbin/vserver-stat', ] )
211             if slicename:
212                 logger.log_call ( ['/usr/sbin/vserver',slicename,'status', ])
213
214     def is_running (self, slicename):
215         try:
216             return accounts.get(slicename).is_running()
217         except:
218             return False
219
220     # quick an d dirty - this does not obey the accounts/sliver_vs/controller hierarchy
221     def suspend_slice(self, slicename):
222         logger.log('reservation: Suspending slice %s'%(slicename))
223         self.debug_box('before suspending',slicename)
224         worker=accounts.get(slicename)
225         try:
226             logger.verbose("reservation: Located worker object %r"%worker)
227             worker.stop()
228         except:
229             logger.log_exc("reservation.suspend_slice: Could not stop slice %s through its worker"%slicename)
230         # we hope the status line won't return anything
231         self.debug_box('after suspending',slicename)
232
233     def suspend_all_slices (self):
234         for sliver in self.data['slivers']:
235             # is this a system sliver ?
236             system_slice=False
237             for d in sliver['attributes']:
238                 if d['tagname']=='system' and d['value'] : system_slice=True
239             if not system_slice:
240                 self.suspend_slice(sliver['name'])
241
242     def restart_slice(self, slicename):
243         logger.log('reservation: Restarting slice %s'%(slicename))
244         self.debug_box('before restarting',slicename)
245         worker=accounts.get(slicename)
246         try:
247             # dig in self.data to retrieve corresponding rec
248             slivers = [ sliver for sliver in self.data['slivers'] if sliver['name']==slicename ]
249             sliver=slivers[0]
250             record=database.db.get(slicename)
251             record['enabled']=True
252             #
253             logger.verbose("reservation: Located worker object %r"%worker)
254             logger.verbose("reservation: Located record at the db %r"%record)
255             worker.start(record)
256         except:
257             logger.log_exc("reservation.restart_slice: Could not start slice %s through its worker"%slicename)
258         # we hope the status line won't return anything
259         self.debug_box('after restarting',slicename)