X-Git-Url: http://git.onelab.eu/?p=nodemanager.git;a=blobdiff_plain;f=sliver_libvirt.py;h=cea8a391471df523f6399ddbe334cefb619d7c01;hp=cf6b0c2888397bab3daf0a90826c097cf0f446d2;hb=48a73b18fd7daed13c645c1adeddb57b560e7a2d;hpb=7b8fc390afd0349706c45c3ae970770cdf9dceae diff --git a/sliver_libvirt.py b/sliver_libvirt.py index cf6b0c2..cea8a39 100644 --- a/sliver_libvirt.py +++ b/sliver_libvirt.py @@ -170,7 +170,7 @@ class Sliver_Libvirt(Account): try: # create actually means start self.dom.create() - except Exception, e: + except Exception as e: # XXX smbaker: attempt to resolve slivers that are stuck in # "failed to allocate free veth". if "ailed to allocate free veth" in str(e): @@ -225,7 +225,7 @@ class Sliver_Libvirt(Account): # Btrfs support quota per volumes - if rec.has_key("rspec") and rec["rspec"].has_key("tags"): + if "rspec" in rec and "tags" in rec["rspec"]: if cgroups.get_cgroup_path(self.name) == None: # If configure is called before start, then the cgroups won't exist # yet. NM will eventually re-run configure on the next iteration. @@ -235,7 +235,7 @@ class Sliver_Libvirt(Account): else: tags = rec["rspec"]["tags"] # It will depend on the FS selection - if tags.has_key('disk_max'): + if 'disk_max' in tags: disk_max = tags['disk_max'] if disk_max == 0: # unlimited @@ -245,17 +245,17 @@ class Sliver_Libvirt(Account): pass # Memory allocation - if tags.has_key('memlock_hard'): + if 'memlock_hard' in tags: mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory") - if tags.has_key('memlock_soft'): + if 'memlock_soft' in tags: mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory") # CPU allocation # Only cpu_shares until figure out how to provide limits and guarantees # (RT_SCHED?) - if tags.has_key('cpu_share'): + if 'cpu_share' in tags: cpu_share = tags['cpu_share'] cgroups.write(self.name, 'cpu.shares', cpu_share)