6 There are a couple of tricky things going on here. First, the kernel
7 needs disk usage information in order to enforce the quota. However,
8 determining disk usage redundantly strains the disks. Thus, the
9 Sliver_VS.disk_usage_initialized flag is used to determine whether
10 this initialization has been made.
12 Second, it's not currently possible to set the scheduler parameters
13 for a sliver unless that sliver has a running process. /bin/vsh helps
14 us out by reading the configuration file so that it can set the
15 appropriate limits after entering the sliver context. Making the
16 syscall that actually sets the parameters gives a harmless error if no
17 process is running. Thus we keep vm_running on when setting scheduler
18 parameters so that set_sched_params() always makes the syscall, and we
19 don't have to guess if there is a running process or not.
32 from threading import BoundedSemaphore
34 globalsem = BoundedSemaphore()
36 # special constant that tells vserver to keep its existing settings
37 KEEP_LIMIT = vserver.VC_LIM_KEEP
39 # populate the sliver/vserver specific default allocations table,
40 # which is used to look for slice attributes
41 DEFAULT_ALLOCATION = {}
42 for rlimit in vserver.RLIMITS.keys():
44 DEFAULT_ALLOCATION["%s_min"%rlim]=KEEP_LIMIT
45 DEFAULT_ALLOCATION["%s_soft"%rlim]=KEEP_LIMIT
46 DEFAULT_ALLOCATION["%s_hard"%rlim]=KEEP_LIMIT
48 class Sliver_VS(accounts.Account, vserver.VServer):
49 """This class wraps vserver.VServer to make its interface closer to what we need."""
52 TYPE = 'sliver.VServer'
53 _init_disk_info_sem = globalsem
55 def __init__(self, rec):
57 logger.verbose ('%s: initing Sliver_VS'%name)
59 logger.log("%s: first chance..."%name)
60 vserver.VServer.__init__(self, name,logfile='/var/log/nm')
61 except Exception, err:
62 if not isinstance(err, vserver.NoSuchVServer):
63 # Probably a bad vserver or vserver configuration file
64 logger.log_exc("sliver_vs.__init__ (1) %s",name=name)
65 logger.log('%s: recreating bad vserver' % name)
67 self.create(name, rec['vref'])
68 logger.log("%s: second chance..."%name)
69 vserver.VServer.__init__(self, name,logfile='/var/log/nm')
74 self.slice_id = rec['slice_id']
75 self.disk_usage_initialized = False
76 self.initscriptchanged = False
81 def create(name, vref = None):
82 logger.verbose('Sliver_VS:create - name=%s'%name)
84 logger.log("%s: ERROR - no vref attached, this is unexpected"%name)
86 # used to look in /etc/planetlab/family,
87 # now relies on the 'GetSliceFamily' extra attribute in GetSlivers()
88 # which for legacy is still exposed here as the 'vref' key
90 # check the template exists -- there's probably a better way..
91 if not os.path.isdir ("/vservers/.vref/%s"%vref):
92 logger.log ("%s: ERROR Could not create sliver - vreference image %s not found"%(name,vref))
97 (x,y,arch)=vref.split('-')
98 # mh, this of course applies when 'vref' is e.g. 'netflow'
99 # and that's not quite right
103 def personality (arch):
104 personality="linux32"
105 if arch.find("64")>=0:
106 personality="linux64"
109 logger.log_call('/usr/sbin/vuseradd', '-t', vref, name)
110 # export slicename to the slice in /etc/slicename
111 file('/vservers/%s/etc/slicename' % name, 'w').write(name)
112 file('/vservers/%s/etc/slicefamily' % name, 'w').write(vref)
113 # set personality: only if needed (if arch's differ)
114 if tools.root_context_arch() != arch:
115 file('/etc/vservers/%s/personality' % name, 'w').write(personality(arch))
116 logger.log('%s: set personality to %s'%(name,personality(arch)))
119 def destroy(name): logger.log_call('/usr/sbin/vuserdel', name)
121 def configure(self, rec):
122 new_rspec = rec['_rspec']
123 if new_rspec != self.rspec:
124 self.rspec = new_rspec
127 new_initscript = rec['initscript']
128 if new_initscript != self.initscript:
129 self.initscript = new_initscript
130 self.initscriptchanged = True
132 accounts.Account.configure(self, rec) # install ssh keys
134 def start(self, delay=0):
135 if self.rspec['enabled'] > 0:
136 logger.log('%s: starting in %d seconds' % (self.name, delay))
138 # VServer.start calls fork() internally,
139 # so just close the nonstandard fds and fork once to avoid creating zombies
140 child_pid = os.fork()
142 if self.initscriptchanged:
143 logger.log('%s: installing initscript' % self.name)
144 def install_initscript():
145 flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
146 fd = os.open('/etc/rc.vinit', flags, 0755)
147 os.write(fd, self.initscript)
150 self.chroot_call(install_initscript)
151 except: logger.log_exc("sliver_vs.start",name=self.name)
152 tools.close_nonstandard_fds()
153 vserver.VServer.start(self)
156 os.waitpid(child_pid, 0)
157 self.initscriptchanged = False
158 else: logger.log('not starting, is not enabled', name=self.name)
161 logger.log('%s: stopping' % self.name)
162 vserver.VServer.stop(self)
164 def is_running(self):
165 return vserver.VServer.is_running(self)
167 def set_resources(self,setup=False):
168 disk_max = self.rspec['disk_max']
169 logger.log('%s: setting max disk usage to %d KiB' % (self.name, disk_max))
170 try: # if the sliver is over quota, .set_disk_limit will throw an exception
171 if not self.disk_usage_initialized:
172 self.vm_running = False
173 Sliver_VS._init_disk_info_sem.acquire()
174 logger.log('%s: computing disk usage: beginning' % self.name)
175 try: self.init_disk_info()
176 finally: Sliver_VS._init_disk_info_sem.release()
177 logger.log('%s: computing disk usage: ended' % self.name)
178 self.disk_usage_initialized = True
179 vserver.VServer.set_disklimit(self, max(disk_max, self.disk_blocks))
181 logger.log_exc('failed to set max disk usage',name=self.name)
183 # get/set the min/soft/hard values for all of the vserver
184 # related RLIMITS. Note that vserver currently only
185 # implements support for hard limits.
186 for limit in vserver.RLIMITS.keys():
188 minimum = self.rspec['%s_min'%type]
189 soft = self.rspec['%s_soft'%type]
190 hard = self.rspec['%s_hard'%type]
191 update = self.set_rlimit(limit, hard, soft, minimum)
193 logger.log('%s: setting rlimit %s to (%d, %d, %d)'
194 % (self.name, type, hard, soft, minimum))
196 self.set_capabilities_config(self.rspec['capabilities'])
197 if self.rspec['capabilities']:
198 logger.log('%s: setting capabilities to %s' % (self.name, self.rspec['capabilities']))
200 cpu_pct = self.rspec['cpu_pct']
201 cpu_share = self.rspec['cpu_share']
204 for key in self.rspec.keys():
205 if key.find('sysctl.') == 0:
206 sysctl=key.split('.')
208 path="/proc/sys/%s" % ("/".join(sysctl[1:]))
209 logger.log("%s: opening %s"%(self.name,path))
211 fd = os.open(path, flags)
212 logger.log("%s: writing %s=%s"%(self.name,key,self.rspec[key]))
213 os.write(fd,self.rspec[key])
216 logger.log("%s: could not set %s=%s"%(self.name,key,self.rspec[key]))
217 logger.log("%s: error = %s"%(self.name,e))
220 if self.rspec['enabled'] > 0:
222 logger.log('%s: setting cpu reservation to %d%%' % (self.name, cpu_pct))
227 logger.log('%s: setting cpu share to %d' % (self.name, cpu_share))
231 self.set_sched_config(cpu_pct, cpu_share)
232 # if IP address isn't set (even to 0.0.0.0), sliver won't be able to use network
233 if self.rspec['ip_addresses'] != '0.0.0.0':
234 logger.log('%s: setting IP address(es) to %s' % \
235 (self.name, self.rspec['ip_addresses']))
236 self.set_ipaddresses_config(self.rspec['ip_addresses'])
238 if self.is_running():
239 logger.log("%s: Setting name to %s" % (self.name, self.slice_id))
240 self.setname(self.slice_id)
241 ### Sapan's change needs more work
242 # raise IOException, file does not get created
243 # might be that /etc/vservers is not available here, are we in the chroot ?
244 #logger.log("%s: Storing slice id of %s for PlanetFlow" % (self.name, self.slice_id))
245 #file('/etc/vservers/%s/slice_id' % self.name, 'w').write(self.slice_id)
247 if self.enabled == False:
251 if False: # Does not work properly yet.
252 if self.have_limits_changed():
253 logger.log('%s: limits have changed --- restarting' % self.name)
255 while self.is_running() and stopcount > 0:
259 stopcount = stopcount - 1
262 else: # tell vsh to disable remote login by setting CPULIMIT to 0
263 logger.log('%s: disabling remote login' % self.name)
264 self.set_sched_config(0, 0)