8 from collections import defaultdict
9 from nova.exception import ImageNotFound
10 from nova.api.ec2.cloud import CloudController
11 from sfa.util.faults import SliverDoesNotExist
12 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
13 from sfa.rspecs.rspec import RSpec
14 from sfa.rspecs.elements.hardware_type import HardwareType
15 from sfa.rspecs.elements.node import Node
16 from sfa.rspecs.elements.sliver import Sliver
17 from sfa.rspecs.elements.login import Login
18 from sfa.rspecs.elements.disk_image import DiskImage
19 from sfa.rspecs.elements.services import Services
20 from sfa.rspecs.elements.interface import Interface
21 from sfa.rspecs.elements.fw_rule import FWRule
22 from sfa.util.xrn import Xrn
23 from sfa.planetlab.plxrn import PlXrn
24 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
25 from sfa.rspecs.version_manager import VersionManager
26 from sfa.openstack.security_group import SecurityGroup
27 from sfa.client.multiclient import MultiClient
28 from sfa.util.sfalogging import logger
31 def pubkeys_to_user_data(pubkeys):
32 user_data = "#!/bin/bash\n\n"
33 for pubkey in pubkeys:
34 pubkey = pubkey.replace('\n', '')
35 user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
37 user_data += "echo >> /root/.ssh/authorized_keys"
42 def image_to_rspec_disk_image(image):
44 img['name'] = image['name']
45 img['description'] = image['name']
46 img['os'] = image['name']
47 img['version'] = image['name']
53 def __init__(self, driver):
56 def get_availability_zones(self):
57 zones = self.driver.shell.nova_manager.dns_domains.domains()
61 zones = [zone.name for zone in zones]
64 def list_resources(self, version=None, options=None):
67 version_manager = VersionManager()
68 version = version_manager.get_version(version)
69 rspec_version = version_manager._get_version(
70 version.type, version.version, 'ad')
71 rspec = RSpec(version=version, user_options=options)
72 nodes = self.get_aggregate_nodes()
73 rspec.version.add_nodes(nodes)
76 def describe(self, urns, version=None, options=None):
79 # update nova connection
80 tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
81 self.driver.shell.nova_manager.connect(tenant=tenant_name)
82 instances = self.get_instances(urns)
83 # lookup the sliver allocations
84 sliver_ids = [sliver['sliver_id'] for sliver in slivers]
85 constraint = SliverAllocation.sliver_id.in_(sliver_ids)
86 sliver_allocations = self.driver.api.dbsession().query(
87 SliverAllocation).filter(constraint)
88 sliver_allocation_dict = {}
89 for sliver_allocation in sliver_allocations:
90 sliver_allocation_dict[
91 sliver_allocation.sliver_id] = sliver_allocation
95 for instance in instances:
96 rspec_nodes.append(self.instance_to_rspec_node(instance))
97 geni_sliver = self.instance_to_geni_sliver(
98 instance, sliver_sllocation_dict)
99 geni_slivers.append(geni_sliver)
100 version_manager = VersionManager()
101 version = version_manager.get_version(version)
102 rspec_version = version_manager._get_version(
103 version.type, version.version, 'manifest')
104 rspec = RSpec(version=rspec_version, user_options=options)
105 rspec.xml.set('expires', datetime_to_string(utcparse(time.time())))
106 rspec.version.add_nodes(rspec_nodes)
107 result = {'geni_urn': Xrn(urns[0]).get_urn(),
108 'geni_rspec': rspec.toxml(),
109 'geni_slivers': geni_slivers}
113 def get_instances(self, urns):
114 # parse slice names and sliver ids
119 if xrn.type == 'slice':
120 names.add(xrn.get_slice_name())
121 elif xrn.type == 'sliver':
128 filter['name'] = names
131 servers = self.driver.shell.nova_manager.servers.findall(**filter)
132 instances.extend(servers)
136 def instance_to_rspec_node(self, instance):
138 node_xrn = instance.metadata.get('component_id')
140 node_xrn = OSXrn('cloud', type='node')
142 node_xrn = OSXrn(xrn=node_xrn, type='node')
145 rspec_node['component_id'] = node_xrn.urn
146 rspec_node['component_name'] = node_xrn.name
147 rspec_node['component_manager_id'] = Xrn(
148 self.driver.hrn, 'authority+cm').get_urn()
149 rspec_node['sliver_id'] = OSXrn(
150 name=instance.name, type='slice', id=instance.id).get_urn()
151 if instance.metadata.get('client_id'):
152 rspec_node['client_id'] = instance.metadata.get('client_id')
155 flavor = self.driver.shell.nova_manager.flavors.find(
156 id=instance.flavor['id'])
157 sliver = self.instance_to_sliver(flavor)
160 group_name = instance.metadata.get('security_groups')
162 group = self.driver.shell.nova_manager.security_groups.find(
164 for rule in group.rules:
165 port_range = "%s:%s" % (rule['from_port'], rule['to_port'])
166 fw_rule = FWRule({'protocol': rule['ip_protocol'],
167 'port_range': port_range,
168 'cidr_ip': rule['ip_range']['cidr']})
169 fw_rules.append(fw_rule)
170 sliver['fw_rules'] = fw_rules
171 rspec_node['slivers'] = [sliver]
174 image = self.driver.shell.image_manager.get_images(
175 id=instance.image['id'])
176 if isinstance(image, list) and len(image) > 0:
178 disk_image = image_to_rspec_disk_image(image)
179 sliver['disk_image'] = [disk_image]
182 rspec_node['services'] = []
183 rspec_node['interfaces'] = []
184 addresses = instance.addresses
185 # HACK: public ips are stored in the list of private, but
186 # this seems wrong. Assume pub ip is the last in the list of
187 # private ips until openstack bug is fixed.
188 if addresses.get('private'):
189 login = Login({'authentication': 'ssh-keys',
190 'hostname': addresses.get('private')[-1]['addr'],
191 'port': '22', 'username': 'root'})
192 service = Services({'login': login})
193 rspec_node['services'].append(service)
195 for private_ip in addresses.get('private', []):
196 if_xrn = PlXrn(auth=self.driver.hrn,
197 interface='node%s' % (instance.hostId))
198 if_client_id = Xrn(if_xrn.urn, type='interface',
199 id="eth%s" % if_index).urn
201 rspec_node['sliver_id'], type='slice', id="eth%s" % if_index).urn
202 interface = Interface({'component_id': if_xrn.urn,
203 'client_id': if_client_id,
204 'sliver_id': if_sliver_id})
205 interface['ips'] = [{'address': private_ip['addr'],
206 #'netmask': private_ip['network'],
207 'type': private_ip['version']}]
208 rspec_node['interfaces'].append(interface)
210 # slivers always provide the ssh service
211 for public_ip in addresses.get('public', []):
212 login = Login({'authentication': 'ssh-keys',
213 'hostname': public_ip['addr'],
214 'port': '22', 'username': 'root'})
215 service = Services({'login': login})
216 rspec_node['services'].append(service)
219 def instance_to_sliver(self, instance, xrn=None):
221 sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
222 sliver_id = Xrn(sliver_hrn, type='sliver').urn
224 sliver = Sliver({'sliver_id': sliver_id,
225 'name': instance.name,
226 'type': instance.name,
227 'cpus': str(instance.vcpus),
228 'memory': str(instance.ram),
229 'storage': str(instance.disk)})
232 def instance_to_geni_sliver(self, instance, sliver_allocations=None):
233 if sliver_allocations is None:
234 sliver_allocations = {}
235 sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
236 sliver_id = Xrn(sliver_hrn, type='sliver').urn
238 # set sliver allocation and operational status
239 sliver_allocation = sliver_allocations[sliver_id]
240 if sliver_allocation:
241 allocation_status = sliver_allocation.allocation_state
242 if allocation_status == 'geni_allocated':
243 op_status = 'geni_pending_allocation'
244 elif allocation_status == 'geni_provisioned':
245 state = instance.state.lower()
246 if state == 'active':
247 op_status = 'geni_ready'
248 elif state == 'building':
249 op_status = 'geni_notready'
250 elif state == 'failed':
251 op_status = ' geni_failed'
253 op_status = 'geni_unknown'
255 allocation_status = 'geni_unallocated'
257 geni_sliver = {'geni_sliver_urn': sliver_id,
258 'geni_expires': None,
259 'geni_allocation_status': allocation_status,
260 'geni_operational_status': op_status,
262 'plos_created_at': datetime_to_string(utcparse(instance.created)),
263 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
268 def get_aggregate_nodes(self):
269 zones = self.get_availability_zones()
270 # available sliver/instance/vm types
271 instances = self.driver.shell.nova_manager.flavors.list()
272 if isinstance(instances, dict):
273 instances = instances.values()
275 images = self.driver.shell.image_manager.get_images_detailed()
276 disk_images = [image_to_rspec_disk_image(img) for img in images if img[
277 'container_format'] in ['ami', 'ovf']]
281 xrn = OSXrn(zone, type='node')
282 rspec_node['component_id'] = xrn.urn
283 rspec_node['component_name'] = xrn.name
284 rspec_node['component_manager_id'] = Xrn(
285 self.driver.hrn, 'authority+cm').get_urn()
286 rspec_node['exclusive'] = 'false'
287 rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
288 HardwareType({'name': 'pc'})]
290 for instance in instances:
291 sliver = self.instance_to_sliver(instance)
292 sliver['disk_image'] = disk_images
293 slivers.append(sliver)
294 rspec_node['available'] = 'true'
295 rspec_node['slivers'] = slivers
296 rspec_nodes.append(rspec_node)
300 def create_tenant(self, tenant_name):
301 tenants = self.driver.shell.auth_manager.tenants.findall(
304 self.driver.shell.auth_manager.tenants.create(
305 tenant_name, tenant_name)
306 tenant = self.driver.shell.auth_manager.tenants.find(
312 def create_instance_key(self, slice_hrn, user):
313 slice_name = Xrn(slice_hrn).leaf
314 user_name = Xrn(user['urn']).leaf
315 key_name = "%s_%s" % (slice_name, user_name)
316 pubkey = user['keys'][0]
318 existing_keys = self.driver.shell.nova_manager.keypairs.findall(
320 for existing_key in existing_keys:
321 if existing_key.public_key != pubkey:
322 self.driver.shell.nova_manager.keypairs.delete(existing_key)
323 elif existing_key.public_key == pubkey:
327 self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
330 def create_security_group(self, slicename, fw_rules=None):
333 # use default group by default
334 group_name = 'default'
335 if isinstance(fw_rules, list) and fw_rules:
336 # Each sliver get's its own security group.
337 # Keep security group names unique by appending some random
339 random_name = "".join([random.choice(string.letters + string.digits)
341 group_name = slicename + random_name
342 security_group = SecurityGroup(self.driver)
343 security_group.create_security_group(group_name)
344 for rule in fw_rules:
345 security_group.add_rule_to_group(group_name,
346 protocol=rule.get('protocol'),
347 cidr_ip=rule.get('cidr_ip'),
350 icmp_type_code=rule.get('icmp_type_code'))
351 # Open ICMP by default
352 security_group.add_rule_to_group(group_name,
355 icmp_type_code="-1:-1")
358 def add_rule_to_security_group(self, group_name, **kwds):
359 security_group = SecurityGroup(self.driver)
360 security_group.add_rule_to_group(group_name=group_name,
361 protocol=kwds.get('protocol'),
362 cidr_ip=kwds.get('cidr_ip'),
363 icmp_type_code=kwds.get('icmp_type_code'))
365 def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
366 # logger.debug('Reserving an instance: image: %s, flavor: ' \
367 # '%s, key: %s, name: %s' % \
368 # (image_id, flavor_id, key_name, slicename))
370 # make sure a tenant exists for this slice
371 tenant = self.create_tenant(tenant_name)
373 # add the sfa admin user to this tenant and update our nova client connection
374 # to use these credentials for the rest of this session. This emsures that the instances
375 # we create will be assigned to the correct tenant.
376 sfa_admin_user = self.driver.shell.auth_manager.users.find(
377 name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
378 user_role = self.driver.shell.auth_manager.roles.find(name='user')
379 admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
380 self.driver.shell.auth_manager.roles.add_user_role(
381 sfa_admin_user, admin_role, tenant)
382 self.driver.shell.auth_manager.roles.add_user_role(
383 sfa_admin_user, user_role, tenant)
384 self.driver.shell.nova_manager.connect(tenant=tenant.name)
386 authorized_keys = "\n".join(pubkeys)
387 files = {'/root/.ssh/authorized_keys': authorized_keys}
389 requested_instances = defaultdict(list)
391 # iterate over clouds/zones/nodes
393 for node in rspec.version.get_nodes_with_slivers():
394 instances = node.get('slivers', [])
397 for instance in instances:
400 flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance[
402 image = instance.get('disk_image')
403 if image and isinstance(image, list):
407 "Must specify a disk_image for each VM")
408 image_id = self.driver.shell.nova_manager.images.find(name=image[
410 fw_rules = instance.get('fw_rules', [])
411 group_name = self.create_security_group(
412 instance_name, fw_rules)
413 metadata['security_groups'] = group_name
414 if node.get('component_id'):
415 metadata['component_id'] = node['component_id']
416 if node.get('client_id'):
417 metadata['client_id'] = node['client_id']
418 server = self.driver.shell.nova_manager.servers.create(
422 security_groups=[group_name],
426 slivers.append(server)
427 except Exception as err:
432 def delete_instance(self, instance):
434 def _delete_security_group(inst):
435 security_group = inst.metadata.get('security_groups', '')
437 manager = SecurityGroup(self.driver)
438 timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
439 start_time = time.time()
440 instance_deleted = False
441 while instance_deleted == False and (time.time() - start_time) < timeout:
442 tmp_inst = self.driver.shell.nova_manager.servers.findall(
445 instance_deleted = True
447 manager.delete_security_group(security_group)
449 multiclient = MultiClient()
450 tenant = self.driver.shell.auth_manager.tenants.find(
451 id=instance.tenant_id)
452 self.driver.shell.nova_manager.connect(tenant=tenant.name)
453 args = {'name': instance.name,
455 instances = self.driver.shell.nova_manager.servers.findall(**args)
456 security_group_manager = SecurityGroup(self.driver)
457 for instance in instances:
459 self.driver.shell.nova_manager.servers.delete(instance)
460 # deleate this instance's security groups
461 multiclient.run(_delete_security_group, instance)
464 def stop_instances(self, instance_name, tenant_name, id=None):
465 self.driver.shell.nova_manager.connect(tenant=tenant_name)
466 args = {'name': instance_name}
469 instances = self.driver.shell.nova_manager.servers.findall(**args)
470 for instance in instances:
471 self.driver.shell.nova_manager.servers.pause(instance)
474 def start_instances(self, instance_name, tenant_name, id=None):
475 self.driver.shell.nova_manager.connect(tenant=tenant_name)
476 args = {'name': instance_name}
479 instances = self.driver.shell.nova_manager.servers.findall(**args)
480 for instance in instances:
481 self.driver.shell.nova_manager.servers.resume(instance)
484 def restart_instances(self, instacne_name, tenant_name, id=None):
485 self.stop_instances(instance_name, tenant_name, id)
486 self.start_instances(instance_name, tenant_name, id)
489 def update_instances(self, project_name):