4 """Whole core scheduling
12 """ Whole-core scheduler
14 The main entrypoint is adjustCores(self, slivers) which takes a
15 dictionary of sliver records. The cpu_cores field is pulled from the
16 effective rspec (rec["_rspec"]) for each sliver.
18 If cpu_cores > 0 for a sliver, then that sliver will reserve one or
19 more of the cpu_cores on the machine.
21 One core is always left unreserved for system slices.
28 """ return a list of available cpu identifiers: [0,1,2,3...]
31 # the cpus never change, so if it's already been computed then don't
36 cpuset_cpus = open("/dev/cgroup/cpuset.cpus").readline().strip()
38 # cpuset.cpus could be something as arbitrary as:
40 # deal with commas and ranges
41 for part in cpuset_cpus.split(","):
42 cpuRange = part.split("-")
43 if len(cpuRange) == 1:
44 cpuRange = (cpuRange[0], cpuRange[0])
45 for i in range(int(cpuRange[0]), int(cpuRange[1])+1):
46 if not i in self.cpus:
51 def get_cgroups (self):
52 """ return a list of cgroups
53 this might change as vservers are instantiated, so always compute
57 filenames = os.listdir("/dev/cgroup")
58 for filename in filenames:
59 if os.path.isdir(os.path.join("/dev/cgroup", filename)):
60 cgroups.append(filename)
63 def adjustCores (self, slivers):
64 """ slivers is a dict of {sliver_name: rec}
65 rec is a dict of attributes
66 rec['_rspec'] is the effective rspec
69 logger.log("CoreSched: adjusting cores")
71 cpus = self.get_cpus()[:]
75 for name, rec in slivers.iteritems():
77 cores = rspec.get("cpu_cores", 0)
79 # one cpu core reserved for best effort and system slices
81 logger.log("CoreSched: ran out of cpu cores while scheduling: " + name)
84 logger.log("CoreSched: allocating cpu " + str(cpu) + " to slice " + name)
85 reservations[name] = reservations.get(name,[]) + [cpu]
89 # the leftovers go to everyone else
90 logger.log("CoreSched: allocating cpus " + str(cpus) + " to _default")
91 reservations["_default"] = cpus[:]
93 self.reserveCores(reservations)
95 def reserveCores (self, reservations):
96 """ give a set of reservations (dictionary of slicename:cpuid_list),
97 write those reservations to the appropriate cgroup files.
99 reservations["_default"] is assumed to be the default reservation
100 for slices that do not reserve cores. It's essentially the leftover
104 default = reservations["_default"]
106 # set the default vserver cpuset. this will deal with any vservers
107 # that might be created before the nodemanager has had a chance to
108 # update the cpusets.
109 self.reserveDefault(default)
111 for cgroup in self.get_cgroups():
112 cpus = reservations.get(cgroup, default)
114 logger.log("CoreSched: reserving " + cgroup + " " + str(cpus))
116 file("/dev/cgroup/" + cgroup + "/cpuset.cpus", "w").write( self.listToRange(cpus) + "\n" )
118 def reserveDefault (self, cpus):
119 if not os.path.exists("/etc/vservers/.defaults/cgroup"):
120 os.makedirs("/etc/vservers/.defaults/cgroup")
122 file("/etc/vservers/.defaults/cgroup/cpuset.cpus", "w").write( self.listToRange(cpus) + "\n" )
124 def listToRange (self, list):
125 """ take a list of items [1,2,3,5,...] and return it as a range: "1-3,5"
126 for now, just comma-separate
128 return ",".join( [str(i) for i in list] )
131 if __name__=="__main__":
134 print "cpus:", x.listToRange(x.get_cpus())
135 print "cgroups:", ",".join(x.get_cgroups())
137 # a quick self-test for ScottLab slices sl_test1 and sl_test2
141 rspec_sl_test1 = {"cpu_cores": 1}
142 rec_sl_test1 = {"_rspec": rspec_sl_test1}
144 rspec_sl_test2 = {"cpu_cores": 1}
145 rec_sl_test2 = {"_rspec": rspec_sl_test2}
147 slivers = {"sl_test1": rec_sl_test1, "sl_test2": rec_sl_test2}
149 x.adjustCores(slivers)