3 There are a couple of tricky things going on here. First, the kernel
4 needs disk usage information in order to enforce the quota. However,
5 determining disk usage redundantly strains the disks. Thus, the
6 Sliver_VS.disk_usage_initialized flag is used to determine whether
7 this initialization has been made.
9 Second, it's not currently possible to set the scheduler parameters
10 for a sliver unless that sliver has a running process. /bin/vsh helps
11 us out by reading the configuration file so that it can set the
12 appropriate limits after entering the sliver context. Making the
13 syscall that actually sets the parameters gives a harmless error if no
14 process is running. Thus we keep vm_running on when setting scheduler
15 parameters so that set_sched_params() always makes the syscall, and we
16 don't have to guess if there is a running process or not.
29 from threading import BoundedSemaphore
31 globalsem = BoundedSemaphore()
33 # special constant that tells vserver to keep its existing settings
34 KEEP_LIMIT = vserver.VC_LIM_KEEP
36 # populate the sliver/vserver specific default allocations table,
37 # which is used to look for slice attributes
38 DEFAULT_ALLOCATION = {}
39 for rlimit in vserver.RLIMITS.keys():
41 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
42 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
43 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
45 class Sliver_VS(accounts.Account, vserver.VServer):
46 """This class wraps vserver.VServer to make its interface closer to what we need."""
49 TYPE = 'sliver.VServer'
50 _init_disk_info_sem = globalsem
52 def __init__(self, rec):
53 logger.verbose ('initing Sliver_VS with name=%s'%rec['name'])
55 vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
56 except Exception, err:
57 if not isinstance(err, vserver.NoSuchVServer):
58 # Probably a bad vserver or vserver configuration file
59 logger.log_exc(self.name)
60 logger.log('%s: recreating bad vserver' % rec['name'])
61 self.destroy(rec['name'])
62 self.create(rec['name'], rec['vref'])
63 vserver.VServer.__init__(self, rec['name'],logfile='/var/log/nm')
68 self.slice_id = rec['slice_id']
69 self.disk_usage_initialized = False
70 self.initscriptchanged = False
73 _root_context_arch=None
75 def root_context_arch():
76 if not Sliver_VS._root_context_arch:
77 Sliver_VS._root_context_arch=commands.getoutput("uname -i")
78 return Sliver_VS._root_context_arch
81 def personality (arch):
83 if arch.find("64")>=0:
88 def create(name, vref = None):
89 logger.verbose('Sliver_VS:create - name=%s'%name)
93 ### locating the right slicefamily
94 # this is a first draft, and more a proof of concept thing
95 # the idea is to parse vref for dash-separated wishes,
96 # and to project these against the defaults
97 # so e.g. if the default slice family (as found in /etc/planetlab/slicefamily)
98 # is planetlab-f8-i386, then here is what we get
99 # vref=x86_64 -> vuseradd -t planetlab-f8-x86_64
100 # vref=centos5 -> vuseradd -t planetlab-centos5-i386
101 # vref=centos5-onelab -> vuseradd -t onelab-centos5-i386
102 # vref=planetflow -> vuseradd -t planetflow-f8-i386
103 # vref=x86_64-planetflow -> vuseradd -t planetflow-f8-x86_64
106 default=file("/etc/planetlab/slicefamily").read().strip()
107 (pldistro,fcdistro,arch) = default.split("-")
109 known_archs = [ 'i386', 'x86_64' ]
110 known_fcdistros = [ 'f8', 'f9', 'centos5' ]
111 # from the slice attribute: cut dashes and try to figure the meaning
112 slice_wishes = vref.split("-")
113 for wish in slice_wishes:
114 if wish in known_archs:
116 elif wish in known_fcdistros:
122 refname="-".join( (pldistro,fcdistro,arch) )
124 # check the template exists -- there's probably a better way..
125 if os.path.isdir ("/vservers/.vref/%s"% vref): refname = vref
127 if not os.path.isdir ("/vservers/.vref/%s"% refname):
128 logger.log("%s (%s) : vref %s not found, using default %s"%(
129 name,vref,refname,default))
131 # reset so arch is right
132 (pldistro,fcdistro,arch) = default.split("-")
133 # could check again, but as we have /etc/slicefamily
134 # there's probably no /vservers/.vref/default
137 # have not found slicefamily
138 logger.log("%s (%s): legacy node - using fallback vrefname 'default'"%(name,vref))
144 logger.log("%s (%s) : unexpected error follows - using 'default'"%(name,vref))
145 logger.log(traceback.format_exc())
149 logger.log_call('/usr/sbin/vuseradd', '-t', refname, name)
150 # export slicename to the slice in /etc/slicename
151 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
152 # set personality: only if needed (if arch's differ)
153 if Sliver_VS.root_context_arch() != arch:
154 file('/etc/vservers/%s/personality' % name, 'w').write(Sliver_VS.personality(arch))
155 logger.log('%s: set personality to %s'%(name,Sliver_VS.personality(arch)))
158 def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
160 def configure(self, rec):
161 new_rspec = rec['_rspec']
162 if new_rspec != self.rspec:
163 self.rspec = new_rspec
166 new_initscript = rec['initscript']
167 if new_initscript != self.initscript:
168 self.initscript = new_initscript
169 self.initscriptchanged = True
172 accounts.Account.configure(self, rec)
174 def start(self, delay=0):
175 if self.rspec['enabled'] > 0:
176 logger.log('%s: starting in %d seconds' % (self.name, delay))
178 # VServer.start calls fork() internally,
179 # so just close the nonstandard fds and fork once to avoid creating zombies
180 child_pid = os.fork()
182 if self.initscriptchanged:
183 logger.log('%s: installing initscript' % self.name)
184 def install_initscript():
185 flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
186 fd = os.open('/etc/rc.vinit', flags, 0755)
187 os.write(fd, self.initscript)
190 self.chroot_call(install_initscript)
191 except: logger.log_exc(self.name)
192 tools.close_nonstandard_fds()
193 vserver.VServer.start(self)
196 os.waitpid(child_pid, 0)
197 self.initscriptchanged = False
198 else: logger.log('%s: not starting, is not enabled' % self.name)
201 logger.log('%s: stopping' % self.name)
202 vserver.VServer.stop(self)
204 def is_running(self):
205 return vserver.VServer.is_running(self)
207 def set_resources(self,setup=False):
208 disk_max = self.rspec['disk_max']
209 logger.log('%s: setting max disk usage to %d KiB' % (self.name, disk_max))
210 try: # if the sliver is over quota, .set_disk_limit will throw an exception
211 if not self.disk_usage_initialized:
212 self.vm_running = False
213 Sliver_VS._init_disk_info_sem.acquire()
214 logger.log('%s: computing disk usage: beginning' % self.name)
215 try: self.init_disk_info()
216 finally: Sliver_VS._init_disk_info_sem.release()
217 logger.log('%s: computing disk usage: ended' % self.name)
218 self.disk_usage_initialized = True
219 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
221 logger.log('%s: failed to set max disk usage' % self.name)
222 logger.log_exc(self.name)
224 # get/set the min/soft/hard values for all of the vserver
225 # related RLIMITS. Note that vserver currently only
226 # implements support for hard limits.
227 for limit in vserver.RLIMITS.keys():
229 minimum = self.rspec['%s_min'%type]
230 soft = self.rspec['%s_soft'%type]
231 hard = self.rspec['%s_hard'%type]
232 update = self.set_rlimit(limit, hard, soft, minimum)
234 logger.log('%s: setting rlimit %s to (%d, %d, %d)'
235 % (self.name, type, hard, soft, minimum))
237 self.set_capabilities_config(self.rspec['capabilities'])
238 if self.rspec['capabilities']:
239 logger.log('%s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
241 cpu_pct = self.rspec['cpu_pct']
242 cpu_share = self.rspec['cpu_share']
245 for key in self.rspec.keys():
246 if key.find('sysctl.') == 0:
247 sysctl=key.split('.')
249 path="/proc/sys/%s" % ("/".join(sysctl[1:]))
250 logger.log("%s: opening %s"%(self.name,path))
252 fd = os.open(path, flags)
253 logger.log("%s: writing %s=%s"%(self.name,key,self.rspec[key]))
254 os.write(fd,self.rspec[key])
257 logger.log("%s: could not set %s=%s"%(self.name,key,self.rspec[key]))
258 logger.log("%s: error = %s"%(self.name,e))
261 if self.rspec['enabled'] > 0:
263 logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
268 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
272 self.set_sched_config(cpu_pct, cpu_share)
273 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
274 if self.rspec['ip_addresses'] != '0.0.0.0':
275 logger.log('%s: setting IP address(es) to %s' % \
276 (self.name, self.rspec['ip_addresses']))
277 self.set_ipaddresses_config(self.rspec['ip_addresses'])
279 if self.is_running():
280 logger.log("%s: Setting name to %s" % (self.name, self.slice_id),2)
281 self.setname(self.slice_id)
283 if False: # Does not work properly yet.
284 if self.have_limits_changed():
285 logger.log('%s: limits have changed --- restarting' % self.name)
287 while self.is_running() and stopcount > 0:
291 stopcount = stopcount - 1
294 else: # tell vsh to disable remote login by setting CPULIMIT to 0
295 logger.log('%s: disabling remote login' % self.name)
296 self.set_sched_config(0, 0)