1 """Whole core scheduling
9 glo_coresched_simulate = False
10 joinpath = os.path.join
13 """ Whole-core scheduler
15 The main entrypoint is adjustCores(self, slivers) which takes a
16 dictionary of sliver records. The cpu_cores field is pulled from the
17 effective rspec (rec["_rspec"]) for each sliver.
19 If cpu_cores > 0 for a sliver, then that sliver will reserve one or
20 more of the cpu_cores on the machine.
22 One core is always left unreserved for system slices.
25 def __init__(self, cgroup_var_name="cpuset.cpus", slice_attr_name="cpu_cores"):
27 self.cgroup_var_name = cgroup_var_name
28 self.slice_attr_name = slice_attr_name
29 self.cgroup_mem_name = "cpuset.mems"
34 def get_cgroup_var(self, name=None, subsys=None, filename=None):
35 """ decode cpuset.cpus or cpuset.mems into a list of units that can
39 assert(filename!=None or name!=None)
42 # filename="/dev/cgroup/" + name
43 filename = reduce(lambda a, b: joinpath(a, b) if b else a, [subsys, name],
44 cgroups.get_base_path())
46 data = open(filename).readline().strip()
53 # cpuset.cpus could be something as arbitrary as:
55 # deal with commas and ranges
56 for part in data.split(","):
57 unitRange = part.split("-")
58 if len(unitRange) == 1:
59 unitRange = (unitRange[0], unitRange[0])
60 for i in range(int(unitRange[0]), int(unitRange[1])+1):
67 """ return a list of available cpu identifiers: [0,1,2,3...]
70 # the cpus never change, so if it's already been computed then don't
75 self.cpus = self.get_cgroup_var(self.cgroup_var_name, 'cpuset')
77 self.cpu_siblings = {}
78 for item in self.cpus:
79 self.cpu_siblings[item] = self.get_core_siblings(item)
83 def find_cpu_mostsiblings(self, cpus):
88 for candidate in self.cpu_siblings[cpu]:
91 if (count > bestCount):
99 def find_compatible_cpu(self, cpus, compatCpu):
101 return self.find_cpu_mostsiblings(cpus)
103 # find a sibling if we can
107 if compatCpu in self.cpu_siblings[cpu]:
110 return self.find_cpu_mostsiblings(cpus)
112 def get_cgroups (self):
113 """ return a list of cgroups
114 this might change as vservers are instantiated, so always compute
117 return cgroups.get_cgroups()
119 #filenames = os.listdir("/dev/cgroup")
120 #for filename in filenames:
121 # if os.path.isdir(os.path.join("/dev/cgroup", filename)):
122 # cgroups.append(filename)
125 def decodeCoreSpec (self, cores):
126 """ Decode the value of the core attribute. It's a number, followed by
127 an optional letter "b" to indicate besteffort cores should also
132 if cores.endswith("b"):
141 return (cores, bestEffort)
143 def adjustCores (self, slivers):
144 """ slivers is a dict of {sliver_name: rec}
145 rec is a dict of attributes
146 rec['_rspec'] is the effective rspec
149 cpus = self.get_cpus()[:]
150 mems = self.get_mems()[:]
153 if (len(mems) != len(cpus)):
154 logger.log("CoreSched fewer mems than " + self.cgroup_var_name + "; mem scheduling disabled")
157 logger.log("CoreSched (" + self.cgroup_var_name + "): available units: " + str(cpus))
160 mem_reservations = {}
162 # allocate the cores to the slivers that have them reserved
163 # TODO: Need to sort this from biggest cpu_cores to smallest
164 for name, rec in slivers.iteritems():
165 rspec = rec["_rspec"]
166 cores = rspec.get(self.slice_attr_name, 0)
167 (cores, bestEffort) = self.decodeCoreSpec(cores)
172 # one cpu core reserved for best effort and system slices
174 logger.log("CoreSched: ran out of units while scheduling sliver " + name)
176 cpu = self.find_compatible_cpu(cpus, lastCpu)
180 logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name)
181 reservations[name] = reservations.get(name,[]) + [cpu]
183 # now find a memory node to go with the cpu
185 mem = self.find_associated_memnode(mems, cpu)
188 logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name)
189 mem_reservations[name] = mem_reservations.get(name,[]) + [mem]
191 logger.log("CoreSched: failed to find memory node for cpu" + str(cpu))
195 # the leftovers go to everyone else
196 logger.log("CoreSched: allocating unit " + str(cpus) + " to _default")
197 reservations["_default"] = cpus[:]
198 mem_reservations["_default"] = mems[:]
202 # now check and see if any of our slices had the besteffort flag
204 for name, rec in slivers.iteritems():
205 rspec = rec["_rspec"]
206 cores = rspec.get(self.slice_attr_name, 0)
207 (cores, bestEffort) = self.decodeCoreSpec(cores)
209 freezable = rspec.get("cpu_freezable", 0)
210 if (cores==0) and (freezable == 1):
211 freezeList[name] = "FROZEN"
213 freezeList[name] = "THAWED"
215 # if the bestEffort flag isn't set then we have nothing to do
219 # note that if a reservation is [], then we don't need to add
220 # bestEffort cores to it, since it is bestEffort by default.
222 if reservations.get(name,[]) != []:
223 reservations[name] = reservations[name] + reservations["_default"]
224 mem_reservations[name] = mem_reservations.get(name,[]) + mem_reservations["_default"]
225 logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name]))
227 self.reserveUnits(self.cgroup_var_name, reservations)
229 self.reserveUnits(self.cgroup_mem_name, mem_reservations)
231 self.freezeUnits("freezer.state", freezeList)
233 def freezeUnits (self, var_name, freezeList):
234 for (cgroup, freeze) in freezeList.items():
236 logger.log("CoreSched: setting freezer for " + cgroup + " to " + freeze)
237 if glo_coresched_simulate:
238 print "F", "/dev/cgroup/" + cgroup + "/" + var_name, freeze
240 #file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write(freeze)
241 file("/sys/fs/cgroup/freezer/libvirt/lxc/" + cgroup + "/" + var_name, "w").write(freeze)
243 # the cgroup probably didn't exit...
244 logger.log("CoreSched: exception while setting freeze for " + cgroup)
246 def reserveUnits (self, var_name, reservations):
247 """ give a set of reservations (dictionary of slicename:cpuid_list),
248 write those reservations to the appropriate cgroup files.
250 reservations["_default"] is assumed to be the default reservation
251 for slices that do not reserve cores. It's essentially the leftover
255 default = reservations["_default"]
257 # set the default vserver cpuset. this will deal with any vservers
258 # that might be created before the nodemanager has had a chance to
259 # update the cpusets.
260 self.reserveDefault(var_name, default)
262 for cgroup in self.get_cgroups():
263 if cgroup in reservations:
264 cpus = reservations[cgroup]
265 logger.log("CoreSched: reserving " + var_name + " on " + cgroup + ": " + str(cpus))
267 # no log message for default; too much verbosity in the common case
270 if glo_coresched_simulate:
271 print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)
273 cgroups.write(cgroup, var_name, self.listToRange(cpus))
274 #file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
276 def reserveDefault (self, var_name, cpus):
277 #if not os.path.exists("/etc/vservers/.defaults/cgroup"):
278 # os.makedirs("/etc/vservers/.defaults/cgroup")
280 #if glo_coresched_simulate:
281 # print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
283 # file("/etc/vservers/.defaults/cgroup/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
286 def listToRange (self, list):
287 """ take a list of items [1,2,3,5,...] and return it as a range: "1-3,5"
288 for now, just comma-separate
290 return ",".join( [str(i) for i in list] )
293 """ return a list of available cpu identifiers: [0,1,2,3...]
296 # the cpus never change, so if it's already been computed then don't
301 self.mems = self.get_cgroup_var(self.cgroup_mem_name, 'cpuset')
303 # build a mapping from memory nodes to the cpus they can be used with
306 for item in self.mems:
307 mems_map[item] = self.get_memnode_cpus(item)
309 if (len(mems_map)>0):
310 # when NUMA_EMU is enabled, only the last memory node will contain
311 # the cpu_map. For example, if there were originally 2 nodes and
312 # we used NUM_EMU to raise it to 12, then
316 # mems_map[5]=[1,3,5,7,9,11]
320 # mems_map[11]=[0,2,4,6,8,10]
321 # so, we go from back to front, copying the entries as necessary.
323 if mems_map[self.mems[0]] == []:
325 for item in reversed(self.mems):
326 if mems_map[item]!=[]:
327 work = mems_map[item]
328 else: # mems_map[item]==[]
329 mems_map[item] = work
331 self.mems_map = mems_map
335 def find_associated_memnode(self, mems, cpu):
336 """ Given a list of memory nodes and a cpu, see if one of the nodes in
337 the list can be used with that cpu.
340 if cpu in self.mems_map[item]:
344 def get_memnode_cpus(self, index):
345 """ for a given memory node, return the CPUs that it is associated
348 fn = "/sys/devices/system/node/node" + str(index) + "/cpulist"
349 if not os.path.exists(fn):
350 logger.log("CoreSched: failed to locate memory node" + fn)
353 return self.get_cgroup_var(filename=fn)
355 def get_core_siblings(self, index):
356 # use core_siblings rather than core_siblings_list, as it's compatible
358 fn = "/sys/devices/system/cpu/cpu" + str(index) + "/topology/core_siblings"
359 if not os.path.exists(fn):
363 x = open(fn, 'rt').readline().strip().split(',')[-1]
369 siblings.append(cpuid)
377 if __name__=="__main__":
378 glo_coresched_simulate = True
382 print "cgroups:", ",".join(x.get_cgroups())
384 print "cpus:", x.listToRange(x.get_cpus())
386 for item in x.get_cpus():
387 print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item,[])])
389 print "mems:", x.listToRange(x.get_mems())
390 print "cpu to memory map:"
391 for item in x.get_mems():
392 print " ", item, ",".join([str(y) for y in x.mems_map.get(item,[])])
394 rspec_sl_test1 = {"cpu_cores": "1"}
395 rec_sl_test1 = {"_rspec": rspec_sl_test1}
397 rspec_sl_test2 = {"cpu_cores": "5"}
398 rec_sl_test2 = {"_rspec": rspec_sl_test2}
400 rspec_sl_test3 = {"cpu_cores": "3b"}
401 rec_sl_test3 = {"_rspec": rspec_sl_test3}
403 #slivers = {"sl_test1": rec_sl_test1, "sl_test2": rec_sl_test2}
405 slivers = {"arizona_beta": rec_sl_test1, "arizona_test101": rec_sl_test2, "pl_sirius": rec_sl_test3}
407 #slivers = {"arizona_beta": rec_sl_test1, "arizona_logmon": rec_sl_test2, "arizona_owl": rec_sl_test3}
409 x.adjustCores(slivers)