zones = [zone.name for zone in zones]
return zones
+ def instance_to_rspec_node(self, slice_xrn, instance):
+ # determine node urn
+ node_xrn = instance.metadata.get('component_id')
+ if not node_xrn:
+ node_xrn = OSXrn('cloud', type='node')
+ else:
+ node_xrn = OSXrn(xrn=node_xrn, type='node')
+
+ rspec_node = Node()
+ rspec_node['component_id'] = node_xrn.urn
+ rspec_node['component_name'] = node_xrn.name
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ if instance.metadata.get('client_id'):
+ rspec_node['client_id'] = instance.metadata.get('client_id')
+
+ # get sliver details
+ sliver_xrn = OSXrn(xrn=slice_xrn, type='slice', id=instance.id)
+ rspec_node['sliver_id'] = sliver_xrn.get_urn()
+ flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
+ sliver = instance_to_sliver(flavor)
+ # get firewall rules
+ fw_rules = []
+ group_name = instance.metadata.get('security_groups')
+ if group_name:
+ group = self.driver.shell.nova_manager.security_groups.find(name=group_name)
+ for rule in group.rules:
+ port_range ="%s:%s" % (rule['from_port'], rule['to_port'])
+ fw_rule = FWRule({'protocol': rule['ip_protocol'],
+ 'port_range': port_range,
+ 'cidr_ip': rule['ip_range']['cidr']})
+ fw_rules.append(fw_rule)
+ sliver['fw_rules'] = fw_rules
+ rspec_node['slivers']= [sliver]
+ # get disk image
+ image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
+ if isinstance(image, list) and len(image) > 0:
+ image = image[0]
+ disk_image = image_to_rspec_disk_image(image)
+ sliver['disk_image'] = [disk_image]
+
+ # get interfaces
+ rspec_node['services'] = []
+ rspec_node['interfaces'] = []
+ addresses = instance.addresses
+ # HACK: public ips are stored in the list of private, but
+ # this seems wrong. Assume pub ip is the last in the list of
+ # private ips until openstack bug is fixed.
+ if addresses.get('private'):
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': addresses.get('private')[-1]['addr'],
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'].append(service)
+
+ for private_ip in addresses.get('private', []):
+ if_xrn = PlXrn(auth=self.driver.hrn,
+ interface='node%s' % (instance.hostId))
+ if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
+ if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
+ interface = Interface({'component_id': if_xrn.urn,
+ 'client_id': if_client_id,
+ 'sliver_id': if_sliver_id})
+ interface['ips'] = [{'address': private_ip['addr'],
+ #'netmask': private_ip['network'],
+ 'type': 'ipv%s' % str(private_ip['version'])}]
+ rspec_node['interfaces'].append(interface)
+
+ # slivers always provide the ssh service
+ for public_ip in addresses.get('public', []):
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': public_ip['addr'],
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'].append(service)
+ return rspec_node
+
def get_slice_nodes(self, slice_xrn):
# update nova connection
tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
instances = self.driver.shell.nova_manager.servers.findall(name=name)
rspec_nodes = []
for instance in instances:
- # determine node urn
- node_xrn = instance.metadata.get('component_id')
- if not node_xrn:
- node_xrn = OSXrn('cloud', type='node')
- else:
- node_xrn = OSXrn(xrn=node_xrn, type='node')
-
- rspec_node = Node()
- rspec_node['component_id'] = node_xrn.urn
- rspec_node['component_name'] = node_xrn.name
- rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
- if instance.metadata.get('client_id'):
- rspec_node['client_id'] = instance.metadata.get('client_id')
-
- # get sliver details
- sliver_xrn = OSXrn(xrn=slice_xrn, type='slice', id=instance.id)
- rspec_node['sliver_id'] = sliver_xrn.get_urn()
- flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
- sliver = instance_to_sliver(flavor)
- # get firewall rules
- fw_rules = []
- group_name = instance.metadata.get('security_groups')
- if group_name:
- group = self.driver.shell.nova_manager.security_groups.find(name=group_name)
- for rule in group.rules:
- port_range ="%s:%s" % (rule['from_port'], rule['to_port'])
- fw_rule = FWRule({'protocol': rule['ip_protocol'],
- 'port_range': port_range,
- 'cidr_ip': rule['ip_range']['cidr']})
- fw_rules.append(fw_rule)
- sliver['fw_rules'] = fw_rules
- rspec_node['slivers']= [sliver]
- # get disk image
- image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
- if isinstance(image, list) and len(image) > 0:
- image = image[0]
- disk_image = image_to_rspec_disk_image(image)
- sliver['disk_image'] = [disk_image]
-
- # get interfaces
- rspec_node['services'] = []
- rspec_node['interfaces'] = []
- addresses = instance.addresses
- # HACK: public ips are stored in the list of private, but
- # this seems wrong. Assume pub ip is the last in the list of
- # private ips until openstack bug is fixed.
- if addresses.get('private'):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': addresses.get('private')[-1]['addr'],
- 'port':'22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
-
- for private_ip in addresses.get('private', []):
- if_xrn = PlXrn(auth=self.driver.hrn,
- interface='node%s' % (instance.hostId))
- if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
- if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
- interface = Interface({'component_id': if_xrn.urn,
- 'client_id': if_client_id,
- 'sliver_id': if_sliver_id})
- interface['ips'] = [{'address': private_ip['addr'],
- #'netmask': private_ip['network'],
- 'type': 'ipv%s' % str(private_ip['version'])}]
- rspec_node['interfaces'].append(interface)
-
- # slivers always provide the ssh service
- for public_ip in addresses.get('public', []):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': public_ip['addr'],
- 'port':'22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
-
- rspec_nodes.append(rspec_node)
+ rspec_nodes.append(self.instance_to_rspec_node(slice_xrn, instance))
return rspec_nodes
def get_aggregate_nodes(self):
files = {'/root/.ssh/authorized_keys': authorized_keys}
rspec = RSpec(rspec)
requested_instances = defaultdict(list)
+
# iterate over clouds/zones/nodes
+ created_instances = []
for node in rspec.version.get_nodes_with_slivers():
instances = node.get('slivers', [])
if not instances:
metadata['security_groups'] = group_name
if node.get('component_id'):
metadata['component_id'] = node['component_id']
- if node.get('client_id'):
- metadata['client_id'] = node['client_id']
- self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
+ server = self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
image=image_id,
key_name = key_name,
security_groups = [group_name],
files=files,
meta=metadata,
name=instance_name)
+ if node.get('client_id'):
+ server.metadata['client_id'] = node['client_id']
+ created_instances.append(server)
+
except Exception, err:
logger.log_exc(err)
+ return created_instances
def delete_instances(self, instance_name, tenant_name):
inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
if not inst:
instance_deleted = True
- time.sleep(.5)
+ time.sleep(1)
manager.delete_security_group(security_group)
thread_manager = ThreadManager()
# sort slivers by node id
for node_id in slice['node_ids']:
- sliver = Sliver({'sliver_id': Xrn(slice_urn, type='slice', id=node_id, authority=self.driver.hrn).urn,
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node_id)
+ sliver_xrn.set_authority(self.driver.hrn)
+ sliver = Sliver({'sliver_id': sliver_xrn.urn,
'name': slice['name'],
'type': 'plab-vserver',
'tags': []})
for tag in tags:
# most likely a default/global sliver attribute (node_id == None)
if tag['node_id'] not in slivers:
- sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=tag['node_id'])
+ sliver_xrn.set_authority(self.driver.hrn)
+ sliver = Sliver({'sliver_id': sliver_xrn.urn,
'name': slice['name'],
'type': 'plab-vserver',
'tags': []})
return (slice, slivers)
- def get_nodes_and_links(self, slice_xrn, slice=None,slivers=[], options={}):
+ def get_nodes_and_links(self, slice_xrn, slice=None,slivers=[], options={}, requested_slivers={}):
# if we are dealing with a slice that has no node just return
# and empty list
if slice_xrn:
rspec_node['component_name'] = node['hostname']
rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.hrn, site['login_base']), 'authority+sa')
+ if requested_slivers and node['hostname'] in requested_slivers:
+ requested_sliver = requested_slivers[node['hostname']]
+ if requested_sliver.get('client_id'):
+ rspec_node['client_id'] = requested_sliver['client_id']
# do not include boot state (<available> element) in the manifest rspec
if not slice:
rspec_node['boot_state'] = node['boot_state']
# add sliver info
sliver = slivers[node['node_id']]
rspec_node['sliver_id'] = sliver['sliver_id']
- rspec_node['client_id'] = node['hostname']
rspec_node['slivers'] = [sliver]
# slivers always provide the ssh service
return rspec_leases
- def get_rspec(self, slice_xrn=None, version = None, options={}):
+ def get_rspec(self, slice_xrn=None, version = None, options={}, requested_slivers={}):
version_manager = VersionManager()
version = version_manager.get_version(version)
rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
- nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers, options)
- rspec.version.add_nodes(nodes)
- rspec.version.add_links(links)
- # add sliver defaults
- default_sliver = slivers.get(None, [])
- if default_sliver:
- default_sliver_attribs = default_sliver.get('tags', [])
- for attrib in default_sliver_attribs:
- logger.info(attrib)
- rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+ nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers, options,
+ requested_slivers=requested_slivers)
+ rspec.version.add_nodes(nodes)
+ rspec.version.add_links(links)
+ # add sliver defaults
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ for attrib in default_sliver_attribs:
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
leases = self.get_leases(slice)
if node['last_contact'] is not None:
res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
- sliver_id = Xrn(slice_urn, type='slice', id=node['node_id'], authority=self.hrn).urn
- res['geni_urn'] = sliver_id
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node['node_id'])
+ sliver_xrn.set_authority(self.hrn)
+
+ res['geni_urn'] = sliver_xrn.urn
if node['boot_state'] == 'boot':
res['geni_status'] = 'ready'
else:
slices.verify_slice_attributes(slice, requested_attributes, options=options)
# add/remove slice from nodes
- requested_slivers = []
- for node in rspec.version.get_nodes_with_slivers():
+ requested_slivers = {}
+ slivers = rspec.version.get_nodes_with_slivers()
+ for node in slivers:
hostname = None
if node.get('component_name'):
hostname = node.get('component_name').strip()
elif node.get('component_id'):
hostname = xrn_to_hostname(node.get('component_id').strip())
if hostname:
- requested_slivers.append(hostname)
- nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
+ requested_slivers[hostname] = node
+ nodes = slices.verify_slice_nodes(slice, requested_slivers.keys(), peer)
# add/remove links links
slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
# only used by plc and ple.
slices.handle_peer(site, slice, persons, peer)
- return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+ return aggregate.get_rspec(slice_xrn=slice_urn,
+ version=rspec.version,
+ requested_slivers = requested_slivers)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
slicename = hrn_to_pl_slicename(slice_hrn)