X-Git-Url: http://git.onelab.eu/?p=nodemanager.git;a=blobdiff_plain;f=sliver_libvirt.py;h=cea8a391471df523f6399ddbe334cefb619d7c01;hp=e898c4baa0fd3007ca5e7dec7160938a3be3ca44;hb=HEAD;hpb=83a563b8ca292a68f490c91224605e758649d3a6 diff --git a/sliver_libvirt.py b/sliver_libvirt.py index e898c4b..cea8a39 100644 --- a/sliver_libvirt.py +++ b/sliver_libvirt.py @@ -24,12 +24,33 @@ STATES = { libvirt.VIR_DOMAIN_CRASHED: 'crashed', } -REASONS = { - libvirt.VIR_CONNECT_CLOSE_REASON_ERROR: 'Misc I/O error', - libvirt.VIR_CONNECT_CLOSE_REASON_EOF: 'End-of-file from server', - libvirt.VIR_CONNECT_CLOSE_REASON_KEEPALIVE: 'Keepalive timer triggered', - libvirt.VIR_CONNECT_CLOSE_REASON_CLIENT: 'Client requested it', -} +# with fedora24 and (broken) libvirt-python-1.3.3-3, +# the following symbols are not available +# kashyap on IRC reported that libvirt-python-1.3.5-1.fc24.x86_64 +# did not have the issue though +try: + REASONS = { + # 0 + libvirt.VIR_CONNECT_CLOSE_REASON_ERROR: 'Misc I/O error', + # 1 + libvirt.VIR_CONNECT_CLOSE_REASON_EOF: 'End-of-file from server', + # 2 + libvirt.VIR_CONNECT_CLOSE_REASON_KEEPALIVE: 'Keepalive timer triggered', + # 3 + libvirt.VIR_CONNECT_CLOSE_REASON_CLIENT: 'Client requested it', + } +except: + REASONS = { + # libvirt.VIR_CONNECT_CLOSE_REASON_ERROR + 0 : 'Misc I/O error', + # libvirt.VIR_CONNECT_CLOSE_REASON_EOF + 1 : 'End-of-file from server', + # libvirt.VIR_CONNECT_CLOSE_REASON_KEEPALIVE + 2 : 'Keepalive timer triggered', + # libvirt.VIR_CONNECT_CLOSE_REASON_CLIENT + 3 : 'Client requested it', + } + logger.log("WARNING : using hard-wired constants instead of symbolic names for CONNECT_CLOSE*") connections = dict() @@ -149,7 +170,7 @@ class Sliver_Libvirt(Account): try: # create actually means start self.dom.create() - except Exception, e: + except Exception as e: # XXX smbaker: attempt to resolve slivers that are stuck in # "failed to allocate free veth". if "ailed to allocate free veth" in str(e): @@ -204,7 +225,7 @@ class Sliver_Libvirt(Account): # Btrfs support quota per volumes - if rec.has_key("rspec") and rec["rspec"].has_key("tags"): + if "rspec" in rec and "tags" in rec["rspec"]: if cgroups.get_cgroup_path(self.name) == None: # If configure is called before start, then the cgroups won't exist # yet. NM will eventually re-run configure on the next iteration. @@ -214,7 +235,7 @@ class Sliver_Libvirt(Account): else: tags = rec["rspec"]["tags"] # It will depend on the FS selection - if tags.has_key('disk_max'): + if 'disk_max' in tags: disk_max = tags['disk_max'] if disk_max == 0: # unlimited @@ -224,17 +245,17 @@ class Sliver_Libvirt(Account): pass # Memory allocation - if tags.has_key('memlock_hard'): + if 'memlock_hard' in tags: mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory") - if tags.has_key('memlock_soft'): + if 'memlock_soft' in tags: mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory") # CPU allocation # Only cpu_shares until figure out how to provide limits and guarantees # (RT_SCHED?) - if tags.has_key('cpu_share'): + if 'cpu_share' in tags: cpu_share = tags['cpu_share'] cgroups.write(self.name, 'cpu.shares', cpu_share)