4 """Whole core scheduling
11 glo_coresched_simulate = False
14 """ Whole-core scheduler
16 The main entrypoint is adjustCores(self, slivers) which takes a
17 dictionary of sliver records. The cpu_cores field is pulled from the
18 effective rspec (rec["_rspec"]) for each sliver.
20 If cpu_cores > 0 for a sliver, then that sliver will reserve one or
21 more of the cpu_cores on the machine.
23 One core is always left unreserved for system slices.
26 def __init__(self, cgroup_var_name="cpuset.cpus", slice_attr_name="cpu_cores"):
28 self.cgroup_var_name = cgroup_var_name
29 self.slice_attr_name = slice_attr_name
30 self.cgroup_mem_name = "cpuset.mems"
35 def get_cgroup_var(self, name=None, filename=None):
36 """ decode cpuset.cpus or cpuset.mems into a list of units that can
40 assert(filename!=None or name!=None)
43 filename="/dev/cgroup/" + name
45 data = open(filename).readline().strip()
52 # cpuset.cpus could be something as arbitrary as:
54 # deal with commas and ranges
55 for part in data.split(","):
56 unitRange = part.split("-")
57 if len(unitRange) == 1:
58 unitRange = (unitRange[0], unitRange[0])
59 for i in range(int(unitRange[0]), int(unitRange[1])+1):
66 """ return a list of available cpu identifiers: [0,1,2,3...]
69 # the cpus never change, so if it's already been computed then don't
74 self.cpus = self.get_cgroup_var(self.cgroup_var_name)
76 self.cpu_siblings = {}
77 for item in self.cpus:
78 self.cpu_siblings[item] = self.get_core_siblings(item)
82 def find_cpu_mostsiblings(self, cpus):
87 for candidate in self.cpu_siblings[cpu]:
90 if (count > bestCount):
98 def find_compatible_cpu(self, cpus, compatCpu):
100 return self.find_cpu_mostsiblings(cpus)
102 # find a sibling if we can
106 if compatCpu in self.cpu_siblings[cpu]:
109 return self.find_cpu_mostsiblings(cpus)
111 def get_cgroups (self):
112 """ return a list of cgroups
113 this might change as vservers are instantiated, so always compute
117 filenames = os.listdir("/dev/cgroup")
118 for filename in filenames:
119 if os.path.isdir(os.path.join("/dev/cgroup", filename)):
120 cgroups.append(filename)
123 def decodeCoreSpec (self, cores):
124 """ Decode the value of the core attribute. It's a number, followed by
125 an optional letter "b" to indicate besteffort cores should also
130 if cores.endswith("b"):
139 return (cores, bestEffort)
141 def adjustCores (self, slivers):
142 """ slivers is a dict of {sliver_name: rec}
143 rec is a dict of attributes
144 rec['_rspec'] is the effective rspec
147 cpus = self.get_cpus()[:]
148 mems = self.get_mems()[:]
151 if (len(mems) != len(cpus)):
152 logger.log("CoreSched fewer mems than " + self.cgroup_var_name + "; mem scheduling disabled")
155 logger.log("CoreSched (" + self.cgroup_var_name + "): available units: " + str(cpus))
158 mem_reservations = {}
160 # allocate the cores to the slivers that have them reserved
161 # TODO: Need to sort this from biggest cpu_cores to smallest
162 for name, rec in slivers.iteritems():
163 rspec = rec["_rspec"]
164 cores = rspec.get(self.slice_attr_name, 0)
165 (cores, bestEffort) = self.decodeCoreSpec(cores)
170 # one cpu core reserved for best effort and system slices
172 logger.log("CoreSched: ran out of units while scheduling sliver " + name)
174 cpu = self.find_compatible_cpu(cpus, lastCpu)
178 logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name)
179 reservations[name] = reservations.get(name,[]) + [cpu]
181 # now find a memory node to go with the cpu
183 mem = self.find_associated_memnode(mems, cpu)
186 logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name)
187 mem_reservations[name] = mem_reservations.get(name,[]) + [mem]
189 logger.log("CoreSched: failed to find memory node for cpu" + str(cpu))
193 # the leftovers go to everyone else
194 logger.log("CoreSched: allocating unit " + str(cpus) + " to _default")
195 reservations["_default"] = cpus[:]
196 mem_reservations["_default"] = mems[:]
198 # now check and see if any of our slices had the besteffort flag
200 for name, rec in slivers.iteritems():
201 rspec = rec["_rspec"]
202 cores = rspec.get(self.slice_attr_name, 0)
203 (cores, bestEffort) = self.decodeCoreSpec(cores)
205 # if the bestEffort flag isn't set then we have nothing to do
209 # note that if a reservation is [], then we don't need to add
210 # bestEffort cores to it, since it is bestEffort by default.
212 if reservations.get(name,[]) != []:
213 reservations[name] = reservations[name] + reservations["_default"]
214 mem_reservations[name] = mem_reservations.get(name,[]) + mem_reservations["_default"]
215 logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name]))
217 self.reserveUnits(self.cgroup_var_name, reservations)
219 self.reserveUnits(self.cgroup_mem_name, mem_reservations)
221 def reserveUnits (self, var_name, reservations):
222 """ give a set of reservations (dictionary of slicename:cpuid_list),
223 write those reservations to the appropriate cgroup files.
225 reservations["_default"] is assumed to be the default reservation
226 for slices that do not reserve cores. It's essentially the leftover
230 default = reservations["_default"]
232 # set the default vserver cpuset. this will deal with any vservers
233 # that might be created before the nodemanager has had a chance to
234 # update the cpusets.
235 self.reserveDefault(var_name, default)
237 for cgroup in self.get_cgroups():
238 if cgroup in reservations:
239 cpus = reservations[cgroup]
240 logger.log("CoreSched: reserving " + var_name + " on " + cgroup + ": " + str(cpus))
242 # no log message for default; too much verbosity in the common case
245 if glo_coresched_simulate:
246 print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)
248 file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
250 def reserveDefault (self, var_name, cpus):
251 if not os.path.exists("/etc/vservers/.defaults/cgroup"):
252 os.makedirs("/etc/vservers/.defaults/cgroup")
254 if glo_coresched_simulate:
255 print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
257 file("/etc/vservers/.defaults/cgroup/" + var_name, "w").write( self.listToRange(cpus) + "\n" )
259 def listToRange (self, list):
260 """ take a list of items [1,2,3,5,...] and return it as a range: "1-3,5"
261 for now, just comma-separate
263 return ",".join( [str(i) for i in list] )
266 """ return a list of available cpu identifiers: [0,1,2,3...]
269 # the cpus never change, so if it's already been computed then don't
274 self.mems = self.get_cgroup_var(self.cgroup_mem_name)
276 # build a mapping from memory nodes to the cpus they can be used with
279 for item in self.mems:
280 mems_map[item] = self.get_memnode_cpus(item)
282 if (len(mems_map)>0):
283 # when NUMA_EMU is enabled, only the last memory node will contain
284 # the cpu_map. For example, if there were originally 2 nodes and
285 # we used NUM_EMU to raise it to 12, then
289 # mems_map[5]=[1,3,5,7,9,11]
293 # mems_map[11]=[0,2,4,6,8,10]
294 # so, we go from back to front, copying the entries as necessary.
296 if mems_map[self.mems[0]] == []:
298 for item in reversed(self.mems):
299 if mems_map[item]!=[]:
300 work = mems_map[item]
301 else: # mems_map[item]==[]
302 mems_map[item] = work
304 self.mems_map = mems_map
308 def find_associated_memnode(self, mems, cpu):
309 """ Given a list of memory nodes and a cpu, see if one of the nodes in
310 the list can be used with that cpu.
313 if cpu in self.mems_map[item]:
317 def get_memnode_cpus(self, index):
318 """ for a given memory node, return the CPUs that it is associated
321 fn = "/sys/devices/system/node/node" + str(index) + "/cpulist"
322 if not os.path.exists(fn):
323 logger.log("CoreSched: failed to locate memory node" + fn)
326 return self.get_cgroup_var(filename=fn)
328 def get_core_siblings(self, index):
329 # use core_siblings rather than core_siblings_list, as it's compatible
331 fn = "/sys/devices/system/cpu/cpu" + str(index) + "/topology/core_siblings"
332 if not os.path.exists(fn):
336 x = int(open(fn,"rt").readline().strip(),16)
340 siblings.append(cpuid)
348 if __name__=="__main__":
349 glo_coresched_simulate = True
353 print "cgroups:", ",".join(x.get_cgroups())
355 print "cpus:", x.listToRange(x.get_cpus())
357 for item in x.get_cpus():
358 print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item,[])])
360 print "mems:", x.listToRange(x.get_mems())
361 print "cpu to memory map:"
362 for item in x.get_mems():
363 print " ", item, ",".join([str(y) for y in x.mems_map.get(item,[])])
365 rspec_sl_test1 = {"cpu_cores": "1"}
366 rec_sl_test1 = {"_rspec": rspec_sl_test1}
368 rspec_sl_test2 = {"cpu_cores": "5"}
369 rec_sl_test2 = {"_rspec": rspec_sl_test2}
371 rspec_sl_test3 = {"cpu_cores": "3b"}
372 rec_sl_test3 = {"_rspec": rspec_sl_test3}
374 #slivers = {"sl_test1": rec_sl_test1, "sl_test2": rec_sl_test2}
376 slivers = {"arizona_beta": rec_sl_test1, "arizona_test101": rec_sl_test2, "pl_sirius": rec_sl_test3}
378 #slivers = {"arizona_beta": rec_sl_test1, "arizona_logmon": rec_sl_test2, "arizona_owl": rec_sl_test3}
380 x.adjustCores(slivers)