1 """Whole core scheduling
9 glo_coresched_simulate = False
10 joinpath = os.path.join
13 """ Whole-core scheduler
15 The main entrypoint is adjustCores(self, slivers) which takes a
16 dictionary of sliver records. The cpu_cores field is pulled from the
17 effective rspec (rec["_rspec"]) for each sliver.
19 If cpu_cores > 0 for a sliver, then that sliver will reserve one or
20 more of the cpu_cores on the machine.
22 One core is always left unreserved for system slices.
25 def __init__(self, cgroup_var_name="cpuset.cpus", slice_attr_name="cpu_cores"):
27 self.cgroup_var_name = cgroup_var_name
28 self.slice_attr_name = slice_attr_name
29 self.cgroup_mem_name = "cpuset.mems"
34 def get_cgroup_var(self, name=None, subsys=None, filename=None):
35 """ decode cpuset.cpus or cpuset.mems into a list of units that can
39 assert(filename!=None or name!=None)
42 # filename="/dev/cgroup/" + name
43 filename = reduce(lambda a, b: joinpath(a, b) if b else a, [subsys, name],
44 cgroups.get_base_path())
46 data = open(filename).readline().strip()
53 # cpuset.cpus could be something as arbitrary as:
55 # deal with commas and ranges
56 for part in data.split(","):
57 unitRange = part.split("-")
58 if len(unitRange) == 1:
59 unitRange = (unitRange[0], unitRange[0])
60 for i in range(int(unitRange[0]), int(unitRange[1])+1):
67 """ return a list of available cpu identifiers: [0,1,2,3...]
70 # the cpus never change, so if it's already been computed then don't
75 self.cpus = self.get_cgroup_var(self.cgroup_var_name, 'cpuset')
77 self.cpu_siblings = {}
78 for item in self.cpus:
79 self.cpu_siblings[item] = self.get_core_siblings(item)
83 def find_cpu_mostsiblings(self, cpus):
88 for candidate in self.cpu_siblings[cpu]:
91 if (count > bestCount):
99 def find_compatible_cpu(self, cpus, compatCpu):
101 return self.find_cpu_mostsiblings(cpus)
103 # find a sibling if we can
107 if compatCpu in self.cpu_siblings[cpu]:
110 return self.find_cpu_mostsiblings(cpus)
112 def get_cgroups (self):
113 """ return a list of cgroups
114 this might change as vservers are instantiated, so always compute
117 return cgroups.get_cgroups()
119 #filenames = os.listdir("/dev/cgroup")
120 #for filename in filenames:
121 # if os.path.isdir(os.path.join("/dev/cgroup", filename)):
122 # cgroups.append(filename)
125 def decodeCoreSpec (self, cores):
126 """ Decode the value of the core attribute. It's a number, followed by
127 an optional letter "b" to indicate besteffort cores should also
132 if cores.endswith("b"):
141 return (cores, bestEffort)
143 def adjustCores (self, slivers):
144 """ slivers is a dict of {sliver_name: rec}
145 rec is a dict of attributes
146 rec['_rspec'] is the effective rspec
149 cpus = self.get_cpus()[:]
150 mems = self.get_mems()[:]
153 if (len(mems) != len(cpus)):
154 logger.log("CoreSched fewer mems than " + self.cgroup_var_name + "; mem scheduling disabled")
157 logger.log("CoreSched (" + self.cgroup_var_name + "): available units: " + str(cpus))
160 mem_reservations = {}
162 # allocate the cores to the slivers that have them reserved
163 # TODO: Need to sort this from biggest cpu_cores to smallest
164 for name, rec in slivers.iteritems():
165 rspec = rec["_rspec"]
166 cores = rspec.get(self.slice_attr_name, 0)
167 (cores, bestEffort) = self.decodeCoreSpec(cores)
172 # one cpu core reserved for best effort and system slices
174 logger.log("CoreSched: ran out of units while scheduling sliver " + name)
176 cpu = self.find_compatible_cpu(cpus, lastCpu)
180 logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name)
181 reservations[name] = reservations.get(name,[]) + [cpu]
183 # now find a memory node to go with the cpu
185 mem = self.find_associated_memnode(mems, cpu)
188 logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name)
189 mem_reservations[name] = mem_reservations.get(name,[]) + [mem]
191 logger.log("CoreSched: failed to find memory node for cpu" + str(cpu))
195 # the leftovers go to everyone else
196 logger.log("CoreSched: allocating unit " + str(cpus) + " to _default")
197 reservations["_default"] = cpus[:]
198 mem_reservations["_default"] = mems[:]
200 # now check and see if any of our slices had the besteffort flag
202 for name, rec in slivers.iteritems():
203 rspec = rec["_rspec"]
204 cores = rspec.get(self.slice_attr_name, 0)
205 (cores, bestEffort) = self.decodeCoreSpec(cores)
207 # if the bestEffort flag isn't set then we have nothing to do
211 # note that if a reservation is [], then we don't need to add
212 # bestEffort cores to it, since it is bestEffort by default.
214 if reservations.get(name,[]) != []:
215 reservations[name] = reservations[name] + reservations["_default"]
216 mem_reservations[name] = mem_reservations.get(name,[]) + mem_reservations["_default"]
217 logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name]))
219 self.reserveUnits(self.cgroup_var_name, reservations)
221 self.reserveUnits(self.cgroup_mem_name, mem_reservations)
223 def reserveUnits (self, var_name, reservations):
224 """ give a set of reservations (dictionary of slicename:cpuid_list),
225 write those reservations to the appropriate cgroup files.
227 reservations["_default"] is assumed to be the default reservation
228 for slices that do not reserve cores. It's essentially the leftover
232 default = reservations["_default"]
234 # set the default vserver cpuset. this will deal with any vservers
235 # that might be created before the nodemanager has had a chance to
236 # update the cpusets.
237 self.reserveDefault(var_name, default)
239 for cgroup in self.get_cgroups():
240 if cgroup in reservations:
241 cpus = reservations[cgroup]
242 logger.log("CoreSched: reserving " + var_name + " on " + cgroup + ": " + str(cpus))
244 # no log message for default; too much verbosity in the common case
247 if glo_coresched_simulate:
248 print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)
250 cgroups.write(cgroup, var_name, self.listToRange(cpus))
251 #file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
253 def reserveDefault (self, var_name, cpus):
254 #if not os.path.exists("/etc/vservers/.defaults/cgroup"):
255 # os.makedirs("/etc/vservers/.defaults/cgroup")
257 #if glo_coresched_simulate:
258 # print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
260 # file("/etc/vservers/.defaults/cgroup/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
263 def listToRange (self, list):
264 """ take a list of items [1,2,3,5,...] and return it as a range: "1-3,5"
265 for now, just comma-separate
267 return ",".join( [str(i) for i in list] )
270 """ return a list of available cpu identifiers: [0,1,2,3...]
273 # the cpus never change, so if it's already been computed then don't
278 self.mems = self.get_cgroup_var(self.cgroup_mem_name, 'cpuset')
280 # build a mapping from memory nodes to the cpus they can be used with
283 for item in self.mems:
284 mems_map[item] = self.get_memnode_cpus(item)
286 if (len(mems_map)>0):
287 # when NUMA_EMU is enabled, only the last memory node will contain
288 # the cpu_map. For example, if there were originally 2 nodes and
289 # we used NUM_EMU to raise it to 12, then
293 # mems_map[5]=[1,3,5,7,9,11]
297 # mems_map[11]=[0,2,4,6,8,10]
298 # so, we go from back to front, copying the entries as necessary.
300 if mems_map[self.mems[0]] == []:
302 for item in reversed(self.mems):
303 if mems_map[item]!=[]:
304 work = mems_map[item]
305 else: # mems_map[item]==[]
306 mems_map[item] = work
308 self.mems_map = mems_map
312 def find_associated_memnode(self, mems, cpu):
313 """ Given a list of memory nodes and a cpu, see if one of the nodes in
314 the list can be used with that cpu.
317 if cpu in self.mems_map[item]:
321 def get_memnode_cpus(self, index):
322 """ for a given memory node, return the CPUs that it is associated
325 fn = "/sys/devices/system/node/node" + str(index) + "/cpulist"
326 if not os.path.exists(fn):
327 logger.log("CoreSched: failed to locate memory node" + fn)
330 return self.get_cgroup_var(filename=fn)
332 def get_core_siblings(self, index):
333 # use core_siblings rather than core_siblings_list, as it's compatible
335 fn = "/sys/devices/system/cpu/cpu" + str(index) + "/topology/core_siblings"
336 if not os.path.exists(fn):
340 x = open(fn, 'rt').readline().strip().split(',')[-1]
346 siblings.append(cpuid)
354 if __name__=="__main__":
355 glo_coresched_simulate = True
359 print "cgroups:", ",".join(x.get_cgroups())
361 print "cpus:", x.listToRange(x.get_cpus())
363 for item in x.get_cpus():
364 print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item,[])])
366 print "mems:", x.listToRange(x.get_mems())
367 print "cpu to memory map:"
368 for item in x.get_mems():
369 print " ", item, ",".join([str(y) for y in x.mems_map.get(item,[])])
371 rspec_sl_test1 = {"cpu_cores": "1"}
372 rec_sl_test1 = {"_rspec": rspec_sl_test1}
374 rspec_sl_test2 = {"cpu_cores": "5"}
375 rec_sl_test2 = {"_rspec": rspec_sl_test2}
377 rspec_sl_test3 = {"cpu_cores": "3b"}
378 rec_sl_test3 = {"_rspec": rspec_sl_test3}
380 #slivers = {"sl_test1": rec_sl_test1, "sl_test2": rec_sl_test2}
382 slivers = {"arizona_beta": rec_sl_test1, "arizona_test101": rec_sl_test2, "pl_sirius": rec_sl_test3}
384 #slivers = {"arizona_beta": rec_sl_test1, "arizona_logmon": rec_sl_test2, "arizona_owl": rec_sl_test3}
386 x.adjustCores(slivers)