+ output += Sliver_Libvirt.dom_details (dom)
+ return output
+
+ # Thierry : I am not quite sure if /etc/libvirt/lxc/<>.xml holds a reliably up-to-date
+ # copy of the sliver XML config; I feel like issuing a virsh dumpxml first might be safer
+ def repair_veth(self):
+ # See workaround email, 2-14-2014, "libvirt 1.2.1 rollout"
+ xmlfilename = "/etc/libvirt/lxc/{}.xml".format(self.name)
+ with open(xmlfilename) as xmlfile:
+ xml = xmlfile.read()
+ veths = re.findall("<target dev='veth[0-9]*'/>", xml)
+ veths = [x[13:-3] for x in veths]
+ for veth in veths:
+ command = ["ip", "link", "delete", veth]
+ logger.log_call(command)
+
+ logger.log("trying to redefine the VM")
+ command = [ "virsh", "define", xmlfilename ]
+ logger.log_call(command)
+
+ def start(self, delay=0):
+ '''Just start the sliver'''
+ logger.verbose('sliver_libvirt: {} start'.format(self.name))
+
+ # Check if it's running to avoid throwing an exception if the
+ # domain was already running
+ if not self.is_running():
+ try:
+ # create actually means start
+ self.dom.create()
+ except Exception, e:
+ # XXX smbaker: attempt to resolve slivers that are stuck in
+ # "failed to allocate free veth".
+ if "ailed to allocate free veth" in str(e):
+ logger.log("failed to allocate free veth on {}".format(self.name))
+ self.repair_veth()
+ logger.log("trying dom.create again")
+ self.dom.create()
+ else:
+ raise
+ else:
+ logger.verbose('sliver_libvirt: sliver {} already started'.format(self.name))
+
+ # After the VM is started... we can play with the virtual interface
+ # Create the ebtables rule to mark the packets going out from the virtual
+ # interface to the actual device so the filter canmatch against the mark
+ bwlimit.ebtables("-A INPUT -i veth{} -j mark --set-mark {}"
+ .format(self.xid, self.xid))
+
+ def stop(self):
+ logger.verbose('sliver_libvirt: {} stop'.format(self.name))
+
+ # Remove the ebtables rule before stopping
+ bwlimit.ebtables("-D INPUT -i veth{} -j mark --set-mark {}"
+ .format(self.xid, self.xid))
+
+ try:
+ self.dom.destroy()
+ except:
+ logger.log_exc("in sliver_libvirt.stop", name=self.name)
+
+ def is_running(self):
+ ''' Return True if the domain is running '''
+ (state, _) = self.dom.state()
+ result = (state == libvirt.VIR_DOMAIN_RUNNING)
+ logger.verbose('sliver_libvirt.is_running: {} => {}'
+ .format(self, result))
+ return result
+
+ def configure(self, rec):
+
+ #sliver.[LXC/QEMU] tolower case
+ #sliver_type = rec['type'].split('.')[1].lower()
+
+ #BASE_DIR = '/cgroup/libvirt/{}/{}/'.format(sliver_type, self.name)
+
+ # Disk allocation
+ # No way through cgroups... figure out how to do that with user/dir quotas.
+ # There is no way to do quota per directory. Chown-ing would create
+ # problems as username namespaces are not yet implemented (and thus, host
+ # and containers share the same name ids
+
+ # Btrfs support quota per volumes
+
+ if rec.has_key("rspec") and rec["rspec"].has_key("tags"):
+ if cgroups.get_cgroup_path(self.name) == None:
+ # If configure is called before start, then the cgroups won't exist
+ # yet. NM will eventually re-run configure on the next iteration.
+ # TODO: Add a post-start configure, and move this stuff there
+ logger.log("Configure: postponing tag check on {} as cgroups are not yet populated"
+ .format(self.name))
+ else:
+ tags = rec["rspec"]["tags"]
+ # It will depend on the FS selection
+ if tags.has_key('disk_max'):
+ disk_max = tags['disk_max']
+ if disk_max == 0:
+ # unlimited
+ pass
+ else:
+ # limit to certain number
+ pass
+
+ # Memory allocation
+ if tags.has_key('memlock_hard'):
+ mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes
+ cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory")
+ if tags.has_key('memlock_soft'):
+ mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes
+ cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory")
+
+ # CPU allocation
+ # Only cpu_shares until figure out how to provide limits and guarantees
+ # (RT_SCHED?)
+ if tags.has_key('cpu_share'):
+ cpu_share = tags['cpu_share']
+ cgroups.write(self.name, 'cpu.shares', cpu_share)
+
+ # Call the upper configure method (ssh keys...)
+ Account.configure(self, rec)
+
+ @staticmethod
+ def get_unique_vif():
+ return 'veth{}'.format(random.getrandbits(32))
+
+ # A placeholder until we get true VirtualInterface objects
+ @staticmethod
+ def get_interfaces_xml(rec):
+ xml = """
+ <interface type='network'>
+ <source network='default'/>
+ <target dev='{}'/>
+ </interface>
+""".format(Sliver_Libvirt.get_unique_vif())
+ try:
+ tags = rec['rspec']['tags']
+ if 'interface' in tags:
+ interfaces = eval(tags['interface'])
+ if not isinstance(interfaces, (list, tuple)):
+ # if interface is not a list, then make it into a singleton list
+ interfaces = [interfaces]
+ tag_xml = ""
+ for interface in interfaces:
+ if 'vlan' in interface:
+ vlanxml = "<vlan><tag id='{}'/></vlan>".format(interface['vlan'])
+ else:
+ vlanxml = ""
+ if 'bridge' in interface:
+ tag_xml = tag_xml + """
+ <interface type='bridge'>
+ <source bridge='{}'/>
+ {}
+ <virtualport type='openvswitch'/>
+ <target dev='{}'/>
+ </interface>
+ """.format(interface['bridge'], vlanxml, Sliver_Libvirt.get_unique_vif())
+ else:
+ tag_xml = tag_xml + """
+ <interface type='network'>
+ <source network='default'/>
+ <target dev='{}'/>
+ </interface>
+ """.format(Sliver_Libvirt.get_unique_vif())