5 There are a couple of tricky things going on here. First, the kernel
6 needs disk usage information in order to enforce the quota. However,
7 determining disk usage redundantly strains the disks. Thus, the
8 Sliver_VS.disk_usage_initialized flag is used to determine whether
9 this initialization has been made.
11 Second, it's not currently possible to set the scheduler parameters
12 for a sliver unless that sliver has a running process. /bin/vsh helps
13 us out by reading the configuration file so that it can set the
14 appropriate limits after entering the sliver context. Making the
15 syscall that actually sets the parameters gives a harmless error if no
16 process is running. Thus we keep vm_running on when setting scheduler
17 parameters so that set_sched_params() always makes the syscall, and we
18 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
29 # the util-vserver-pl module
34 from account import Account
35 from initscript import Initscript
37 # special constant that tells vserver to keep its existing settings
38 KEEP_LIMIT = vserver.VC_LIM_KEEP
40 # populate the sliver/vserver specific default allocations table,
41 # which is used to look for slice attributes
42 DEFAULT_ALLOCATION = {}
43 for rlimit in vserver.RLIMITS.keys():
45 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
47 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
49 class Sliver_VS(vserver.VServer, Account, Initscript):
50 """This class wraps vserver.VServer to make its interface closer to what we need."""
53 TYPE = 'sliver.VServer'
54 _init_disk_info_sem = BoundedSemaphore()
56 def __init__(self, rec):
58 logger.verbose ('sliver_vs: %s init'%name)
60 logger.log("sliver_vs: %s: first chance..."%name)
61 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
62 Account.__init__ (self, name)
63 Initscript.__init__ (self, name)
64 except Exception, err:
65 if not isinstance(err, vserver.NoSuchVServer):
66 # Probably a bad vserver or vserver configuration file
67 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
68 logger.log('sliver_vs: %s: recreating bad vserver' % name)
70 self.create(name, rec)
71 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
72 Account.__init__ (self, name)
73 Initscript.__init__ (self, name)
76 self.slice_id = rec['slice_id']
77 self.disk_usage_initialized = False
79 # xxx this almost certainly is wrong...
83 def create(name, rec = None):
84 logger.verbose('sliver_vs: %s: create'%name)
88 # band-aid for short period as old API doesn't have GetSliceFamily function
89 vref = "planetlab-f8-i386"
90 logger.log("sliver_vs: %s: ERROR - no vref attached, using hard-wired default %s"%(name,vref))
92 # used to look in /etc/planetlab/family,
93 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
94 # which for legacy is still exposed here as the 'vref' key
96 # check the template exists -- there's probably a better way..
97 if not os.path.isdir ("/vservers/.vref/%s"%vref):
98 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
101 # compute guest personality
103 (x,y,arch)=vref.split('-')
104 # mh, this of course applies when 'vref' is e.g. 'netflow'
105 # and that's not quite right
109 def personality (arch): return "linux64" if arch.find("64") >=0 else "linux32"
113 command += ['/bin/bash','-x',]
114 command += ['/usr/sbin/vuseradd', ]
115 if 'attributes' in rec and 'isolate_loopback' in rec['attributes'] and rec['attributes']['isolate_loopback'] == '1':
117 # the vsliver imge to use
118 command += [ '-t', vref, ]
121 logger.log_call(command, timeout=15*60)
122 # export slicename to the slice in /etc/slicename
123 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
124 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
125 # set personality: only if needed (if arch's differ)
126 if tools.root_context_arch() != arch:
127 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
128 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
132 # need to umount before we trash, otherwise we end up with sequels in
133 # /vservers/slicename/ (namely in home/ )
134 # also because this is a static method we cannot check for 'omf_control'
135 # but it is no big deal as umount_ssh_dir checks before it umounts..
136 Account.umount_ssh_dir(name)
137 logger.log("sliver_vs: destroying %s"%name)
138 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
141 def configure(self, rec):
142 # in case we update nodemanager..
143 self.install_and_enable_vinit()
145 new_rspec = rec['_rspec']
146 if new_rspec != self.rspec:
147 self.rspec = new_rspec
150 # do the configure part from Initscript
151 Initscript.configure(self,rec)
153 Account.configure(self, rec) # install ssh keys
155 # remember configure() always gets called *before* start()
156 def start(self, delay=0):
157 if self.rspec['enabled'] <= 0:
158 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
160 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
162 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
163 self.install_and_enable_vinit()
164 # expose .ssh for omf_friendly slivers
165 if 'omf_control' in self.rspec['tags']:
166 Account.mount_ssh_dir(self.name)
167 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
168 self.refresh_slice_vinit()
169 child_pid = os.fork()
171 # VServer.start calls fork() internally,
172 # so just close the nonstandard fds and fork once to avoid creating zombies
173 tools.close_nonstandard_fds()
174 vserver.VServer.start(self)
177 os.waitpid(child_pid, 0)
180 logger.log('sliver_vs: %s: stopping' % self.name)
181 vserver.VServer.stop(self)
183 def is_running(self):
184 return vserver.VServer.is_running(self)
186 # this one seems to belong in Initscript at first sight,
187 # but actually depends on the underlying vm techno
188 # so let's keep it here
189 def rerun_slice_vinit(self):
190 command = "/usr/sbin/vserver %s exec /etc/rc.d/init.d/vinit restart" % (self.name)
191 logger.log("vsliver_vs: %s: Rerunning slice initscript: %s" % (self.name, command))
192 subprocess.call(command + "&", stdin=open('/dev/null', 'r'), stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT, shell=True)
194 def set_resources(self):
195 disk_max = self.rspec['disk_max']
196 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
197 try: # if the sliver is over quota, .set_disk_limit will throw an exception
198 if not self.disk_usage_initialized:
199 self.vm_running = False
200 Sliver_VS._init_disk_info_sem.acquire()
201 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
202 # init_disk_info is inherited from VServer
203 try: self.init_disk_info()
204 finally: Sliver_VS._init_disk_info_sem.release()
205 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
206 self.disk_usage_initialized = True
207 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
209 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
211 # get/set the min/soft/hard values for all of the vserver
212 # related RLIMITS. Note that vserver currently only
213 # implements support for hard limits.
214 for limit in vserver.RLIMITS.keys():
216 minimum = self.rspec['%s_min'%type]
217 soft = self.rspec['%s_soft'%type]
218 hard = self.rspec['%s_hard'%type]
219 update = self.set_rlimit(limit, hard, soft, minimum)
221 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
222 % (self.name, type, hard, soft, minimum))
224 self.set_capabilities_config(self.rspec['capabilities'])
225 if self.rspec['capabilities']:
226 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
228 cpu_pct = self.rspec['cpu_pct']
229 cpu_share = self.rspec['cpu_share']
232 for key in self.rspec.keys():
233 if key.find('sysctl.') == 0:
234 sysctl=key.split('.')
236 # /etc/vservers/<guest>/sysctl/<id>/
237 dirname = "/etc/vservers/%s/sysctl/%s" % (self.name, count)
239 os.makedirs(dirname, 0755)
242 setting = open("%s/setting" % dirname, "w")
243 setting.write("%s\n" % key.lstrip("sysctl."))
245 value = open("%s/value" % dirname, "w")
246 value.write("%s\n" % self.rspec[key])
250 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
252 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
253 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
256 if self.rspec['enabled'] > 0:
258 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
263 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
267 self.set_sched_config(cpu_pct, cpu_share)
268 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
269 if self.rspec['ip_addresses'] != '0.0.0.0':
270 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
271 (self.name, self.rspec['ip_addresses']))
273 if 'isolate_loopback' in self.rspec['tags']:
274 add_loopback = self.rspec['tags']['isolate_loopback'] != "1"
275 self.set_ipaddresses_config(self.rspec['ip_addresses'], add_loopback)
277 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
278 #self.setname(self.slice_id)
279 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
281 vserver_config_path = '/etc/vservers/%s'%self.name
282 if not os.path.exists (vserver_config_path):
283 os.makedirs (vserver_config_path)
284 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
285 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
287 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
289 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
292 if self.enabled == False:
296 if False: # Does not work properly yet.
297 if self.have_limits_changed():
298 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
300 while self.is_running() and stopcount > 0:
304 stopcount = stopcount - 1
307 else: # tell vsh to disable remote login by setting CPULIMIT to 0
308 logger.log('sliver_vs: %s: disabling remote login' % self.name)
309 self.set_sched_config(0, 0)