6 There are a couple of tricky things going on here. First, the kernel
7 needs disk usage information in order to enforce the quota. However,
8 determining disk usage redundantly strains the disks. Thus, the
9 Sliver_VS.disk_usage_initialized flag is used to determine whether
10 this initialization has been made.
12 Second, it's not currently possible to set the scheduler parameters
13 for a sliver unless that sliver has a running process. /bin/vsh helps
14 us out by reading the configuration file so that it can set the
15 appropriate limits after entering the sliver context. Making the
16 syscall that actually sets the parameters gives a harmless error if no
17 process is running. Thus we keep vm_running on when setting scheduler
18 parameters so that set_sched_params() always makes the syscall, and we
19 don't have to guess if there is a running process or not.
26 from threading import BoundedSemaphore
28 # the util-vserver-pl module
35 # special constant that tells vserver to keep its existing settings
36 KEEP_LIMIT = vserver.VC_LIM_KEEP
38 # populate the sliver/vserver specific default allocations table,
39 # which is used to look for slice attributes
40 DEFAULT_ALLOCATION = {}
41 for rlimit in vserver.RLIMITS.keys():
43 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
44 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
47 class Sliver_VS(accounts.Account, vserver.VServer):
48 """This class wraps vserver.VServer to make its interface closer to what we need."""
51 TYPE = 'sliver.VServer'
52 _init_disk_info_sem = BoundedSemaphore()
54 def __init__(self, rec):
56 logger.verbose ('sliver_vs: %s init'%name)
58 logger.log("sliver_vs: %s: first chance..."%name)
59 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
60 except Exception, err:
61 if not isinstance(err, vserver.NoSuchVServer):
62 # Probably a bad vserver or vserver configuration file
63 logger.log_exc("sliver_vs:__init__ (first chance) %s",name=name)
64 logger.log('sliver_vs: %s: recreating bad vserver' % name)
66 self.create(name, rec['vref'])
67 logger.log("sliver_vs: %s: second chance..."%name)
68 vserver.VServer.__init__(self, name,logfile='/var/log/nodemanager')
72 self.slice_id = rec['slice_id']
73 self.disk_usage_initialized = False
79 def create(name, vref = None):
80 logger.verbose('sliver_vs: %s: create'%name)
82 logger.log("sliver_vs: %s: ERROR - no vref attached, this is unexpected"%(name))
84 # used to look in /etc/planetlab/family,
85 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
86 # which for legacy is still exposed here as the 'vref' key
88 # check the template exists -- there's probably a better way..
89 if not os.path.isdir ("/vservers/.vref/%s"%vref):
90 logger.log ("sliver_vs: %s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
95 (x,y,arch)=vref.split('-')
96 # mh, this of course applies when 'vref' is e.g. 'netflow'
97 # and that's not quite right
101 def personality (arch):
102 personality="linux32"
103 if arch.find("64")>=0:
104 personality="linux64"
107 # logger.log_call(['/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
108 logger.log_call(['/bin/bash','-x','/usr/sbin/vuseradd', '-t', vref, name, ], timeout=15*60)
109 # export slicename to the slice in /etc/slicename
110 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
111 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
112 # set personality: only if needed (if arch's differ)
113 if tools.root_context_arch() != arch:
114 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch)+"\n")
115 logger.log('sliver_vs: %s: set personality to %s'%(name,personality(arch)))
119 # logger.log_call(['/usr/sbin/vuserdel', name, ])
120 logger.log_call(['/bin/bash','-x','/usr/sbin/vuserdel', name, ])
122 def configure(self, rec):
123 new_rspec = rec['_rspec']
124 if new_rspec != self.rspec:
125 self.rspec = new_rspec
128 new_initscript = rec['initscript']
129 if new_initscript != self.initscript:
130 self.initscript = new_initscript
131 # not used anymore, we always check against the installed script
132 #self.initscriptchanged = True
133 self.refresh_slice_vinit()
135 accounts.Account.configure(self, rec) # install ssh keys
137 # unconditionnally install and enable the generic vinit script
138 # mimicking chkconfig for enabling the generic vinit script
139 # this is hardwired for runlevel 3
140 def install_and_enable_vinit (self):
141 vinit_source="/usr/share/NodeManager/sliver-initscripts/vinit"
142 vinit_script="/vservers/%s/etc/rc.d/init.d/vinit"%self.name
143 rc3_link="/vservers/%s/etc/rc.d/rc3.d/S99vinit"%self.name
144 rc3_target="../init.d/vinit"
146 body=file(vinit_source).read()
147 if tools.replace_file_with_string(vinit_script,body,chmod=0755):
148 logger.log("vsliver_vs: %s: installed generic vinit rc script"%self.name)
149 # create symlink for runlevel 3
150 if not os.path.islink(rc3_link):
152 logger.log("vsliver_vs: %s: creating runlevel3 symlink %s"%(self.name,rc3_link))
153 os.symlink(rc3_target,rc3_link)
155 logger.log_exc("vsliver_vs: %s: failed to create runlevel3 symlink %s"%rc3_link)
157 # this one checks for the existence of the slice initscript
158 # install or remove the slice inistscript, as instructed by the initscript tag
159 def refresh_slice_vinit(self):
161 sliver_initscript="/vservers/%s/etc/rc.d/init.d/vinit.slice"%self.name
162 tools.replace_file_with_string(sliver_initscript,body,remove_if_empty=True,chmod=0755)
164 def start(self, delay=0):
165 if self.rspec['enabled'] <= 0:
166 logger.log('sliver_vs: not starting %s, is not enabled'%self.name)
168 logger.log('sliver_vs: %s: starting in %d seconds' % (self.name, delay))
170 # the generic /etc/init.d/vinit script is permanently refreshed, and enabled
171 self.install_and_enable_vinit()
172 # if a change has occured in the slice initscript, reflect this in /etc/init.d/vinit.slice
173 self.refresh_slice_vinit()
174 child_pid = os.fork()
176 # VServer.start calls fork() internally,
177 # so just close the nonstandard fds and fork once to avoid creating zombies
178 tools.close_nonstandard_fds()
179 vserver.VServer.start(self)
182 os.waitpid(child_pid, 0)
185 logger.log('sliver_vs: %s: stopping' % self.name)
186 vserver.VServer.stop(self)
188 def is_running(self):
189 return vserver.VServer.is_running(self)
191 def set_resources(self,setup=False):
192 disk_max = self.rspec['disk_max']
193 logger.log('sliver_vs: %s: setting max disk usage to %d KiB' % (self.name, disk_max))
194 try: # if the sliver is over quota, .set_disk_limit will throw an exception
195 if not self.disk_usage_initialized:
196 self.vm_running = False
197 Sliver_VS._init_disk_info_sem.acquire()
198 logger.log('sliver_vs: %s: computing disk usage: beginning' % self.name)
199 # init_disk_info is inherited from VServer
200 try: self.init_disk_info()
201 finally: Sliver_VS._init_disk_info_sem.release()
202 logger.log('sliver_vs: %s: computing disk usage: ended' % self.name)
203 self.disk_usage_initialized = True
204 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
206 logger.log_exc('sliver_vs: failed to set max disk usage',name=self.name)
208 # get/set the min/soft/hard values for all of the vserver
209 # related RLIMITS. Note that vserver currently only
210 # implements support for hard limits.
211 for limit in vserver.RLIMITS.keys():
213 minimum = self.rspec['%s_min'%type]
214 soft = self.rspec['%s_soft'%type]
215 hard = self.rspec['%s_hard'%type]
216 update = self.set_rlimit(limit, hard, soft, minimum)
218 logger.log('sliver_vs: %s: setting rlimit %s to (%d, %d, %d)'
219 % (self.name, type, hard, soft, minimum))
221 self.set_capabilities_config(self.rspec['capabilities'])
222 if self.rspec['capabilities']:
223 logger.log('sliver_vs: %s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
225 cpu_pct = self.rspec['cpu_pct']
226 cpu_share = self.rspec['cpu_share']
229 for key in self.rspec.keys():
230 if key.find('sysctl.') == 0:
231 sysctl=key.split('.')
233 path="/proc/sys/%s" % ("/".join(sysctl[1:]))
234 logger.log("sliver_vs: %s: opening %s"%(self.name,path))
236 fd = os.open(path, flags)
237 logger.log("sliver_vs: %s: writing %s=%s"%(self.name,key,self.rspec[key]))
238 os.write(fd,self.rspec[key])
241 logger.log("sliver_vs: %s: could not set %s=%s"%(self.name,key,self.rspec[key]))
242 logger.log("sliver_vs: %s: error = %s"%(self.name,e))
245 if self.rspec['enabled'] > 0:
247 logger.log('sliver_vs: %s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
252 logger.log('sliver_vs: %s: setting cpu share to %d' % (self.name, cpu_share))
256 self.set_sched_config(cpu_pct, cpu_share)
257 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
258 if self.rspec['ip_addresses'] != '0.0.0.0':
259 logger.log('sliver_vs: %s: setting IP address(es) to %s' % \
260 (self.name, self.rspec['ip_addresses']))
261 self.set_ipaddresses_config(self.rspec['ip_addresses'])
263 #logger.log("sliver_vs: %s: Setting name to %s" % (self.name, self.slice_id))
264 #self.setname(self.slice_id)
265 #logger.log("sliver_vs: %s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
267 vserver_config_path = '/etc/vservers/%s'%self.name
268 if not os.path.exists (vserver_config_path):
269 os.makedirs (vserver_config_path)
270 file('%s/slice_id'%vserver_config_path, 'w').write("%d\n"%self.slice_id)
271 logger.log("sliver_vs: Recorded slice id %d for slice %s"%(self.slice_id,self.name))
273 logger.log("sliver_vs: Could not record slice_id for slice %s. Error: %s"%(self.name,str(e)))
275 logger.log_exc("sliver_vs: Error recording slice id: %s"%str(e),name=self.name)
278 if self.enabled == False:
282 if False: # Does not work properly yet.
283 if self.have_limits_changed():
284 logger.log('sliver_vs: %s: limits have changed --- restarting' % self.name)
286 while self.is_running() and stopcount > 0:
290 stopcount = stopcount - 1
293 else: # tell vsh to disable remote login by setting CPULIMIT to 0
294 logger.log('sliver_vs: %s: disabling remote login' % self.name)
295 self.set_sched_config(0, 0)