X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=coresched_vs.py;h=06aeb547b0ce4b51a994eabf2d0ec35c010f12fc;hb=430195a2b1382182d28dbcd16b21864ed758f52d;hp=d064e93e256c64d603eea4f47fa9a9f666f82bf5;hpb=dd274713d7098ab54c81516d67d700c53fc01e56;p=nodemanager.git diff --git a/coresched_vs.py b/coresched_vs.py index d064e93..06aeb54 100644 --- a/coresched_vs.py +++ b/coresched_vs.py @@ -173,7 +173,7 @@ class CoreSched: lastCpu = cpu logger.log("CoreSched: allocating unit " + str(cpu) + " to slice " + name) - reservations[name] = reservations.get(name,[]) + [cpu] + reservations[name] = reservations.get(name, []) + [cpu] # now find a memory node to go with the cpu if memSchedule: @@ -181,7 +181,7 @@ class CoreSched: if mem != None: mems.remove(mem) logger.log("CoreSched: allocating memory node " + str(mem) + " to slice " + name) - mem_reservations[name] = mem_reservations.get(name,[]) + [mem] + mem_reservations[name] = mem_reservations.get(name, []) + [mem] else: logger.log("CoreSched: failed to find memory node for cpu" + str(cpu)) @@ -206,9 +206,9 @@ class CoreSched: # note that if a reservation is [], then we don't need to add # bestEffort cores to it, since it is bestEffort by default. - if reservations.get(name,[]) != []: + if reservations.get(name, []) != []: reservations[name] = reservations[name] + reservations["_default"] - mem_reservations[name] = mem_reservations.get(name,[]) + mem_reservations["_default"] + mem_reservations[name] = mem_reservations.get(name, []) + mem_reservations["_default"] logger.log("CoreSched: adding besteffort units to " + name + ". new units = " + str(reservations[name])) self.reserveUnits(self.cgroup_var_name, reservations) @@ -242,7 +242,8 @@ class CoreSched: if glo_coresched_simulate: print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus) else: - file("/dev/cgroup/" + cgroup + "/" + var_name, "w").write( self.listToRange(cpus) + "\n" ) + with opwn("/dev/cgroup/{}/{}".format(cgroup, var_name), "w") as f: + f.write( self.listToRange(cpus) + "\n" ) def reserveDefault (self, var_name, cpus): if not os.path.exists("/etc/vservers/.defaults/cgroup"): @@ -251,7 +252,8 @@ class CoreSched: if glo_coresched_simulate: print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus) else: - file("/etc/vservers/.defaults/cgroup/" + var_name, "w").write( self.listToRange(cpus) + "\n" ) + with open("/etc/vservers/.defaults/cgroup/{}".format(var_name), "w") as f: + f.write( self.listToRange(cpus) + "\n" ) def listToRange (self, list): """ take a list of items [1,2,3,5,...] and return it as a range: "1-3,5" @@ -293,7 +295,7 @@ class CoreSched: if mems_map[self.mems[0]] == []: work = [] for item in reversed(self.mems): - if mems_map[item]!=[]: + if mems_map[item] != []: work = mems_map[item] else: # mems_map[item]==[] mems_map[item] = work @@ -330,7 +332,8 @@ class CoreSched: return [] siblings = [] - x = int(open(fn,"rt").readline().strip(),16) + with open(fn, "rt") as f: + x = int(f.readline().strip(), 16) cpuid = 0 while (x>0): if (x&1)!=0: @@ -352,12 +355,12 @@ if __name__=="__main__": print "cpus:", x.listToRange(x.get_cpus()) print "sibling map:" for item in x.get_cpus(): - print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item,[])]) + print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]) print "mems:", x.listToRange(x.get_mems()) print "cpu to memory map:" for item in x.get_mems(): - print " ", item, ",".join([str(y) for y in x.mems_map.get(item,[])]) + print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]) rspec_sl_test1 = {"cpu_cores": "1"} rec_sl_test1 = {"_rspec": rspec_sl_test1}