8 from collections import defaultdict
9 from nova.exception import ImageNotFound
10 from nova.api.ec2.cloud import CloudController
11 from sfa.util.faults import SfaAPIError, SliverDoesNotExist
12 from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
13 from sfa.rspecs.rspec import RSpec
14 from sfa.rspecs.elements.hardware_type import HardwareType
15 from sfa.rspecs.elements.node import Node
16 from sfa.rspecs.elements.sliver import Sliver
17 from sfa.rspecs.elements.login import Login
18 from sfa.rspecs.elements.disk_image import DiskImage
19 from sfa.rspecs.elements.services import Services
20 from sfa.rspecs.elements.interface import Interface
21 from sfa.rspecs.elements.fw_rule import FWRule
22 from sfa.util.xrn import Xrn
23 from sfa.planetlab.plxrn import PlXrn
24 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
25 from sfa.rspecs.version_manager import VersionManager
26 from sfa.openstack.security_group import SecurityGroup
27 from sfa.util.sfalogging import logger
29 def pubkeys_to_user_data(pubkeys):
30 user_data = "#!/bin/bash\n\n"
31 for pubkey in pubkeys:
32 pubkey = pubkey.replace('\n', '')
33 user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
35 user_data += "echo >> /root/.ssh/authorized_keys"
39 def image_to_rspec_disk_image(image):
41 img['name'] = image['name']
42 img['description'] = image['name']
43 img['os'] = image['name']
44 img['version'] = image['name']
49 def __init__(self, driver):
52 def get_availability_zones(self):
53 zones = self.driver.shell.nova_manager.dns_domains.domains()
57 zones = [zone.name for zone in zones]
60 def list_resources(self, version=None, options={}):
61 version_manager = VersionManager()
62 version = version_manager.get_version(version)
63 rspec_version = version_manager._get_version(version.type, version.version, 'ad')
64 rspec = RSpec(version=version, user_options=options)
65 nodes = self.get_aggregate_nodes()
66 rspec.version.add_nodes(nodes)
69 def describe(self, urns, version=None, options={}):
70 # update nova connection
71 tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
72 self.driver.shell.nova_manager.connect(tenant=tenant_name)
73 instances = self.get_instances(urns)
74 if len(instances) == 0:
75 raise SliverDoesNotExist("You have not allocated any slivers here")
78 for instance in instances:
79 rspec_nodes.append(self.instance_to_rspec_node(instance))
80 geni_slivers.append(self.instance_to_geni_sliver(instance))
81 version_manager = VersionManager()
82 version = version_manager.get_version(version)
83 rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
84 rspec = RSpec(version=rspec_version, user_options=options)
85 rspec.version.add_nodes(rspec_nodes)
86 result = {'geni_urn': Xrn(urns[0]).get_urn(),
87 'geni_rspec': rspec.toxml(),
88 'geni_slivers': geni_slivers}
92 def get_instances(self, urns):
93 # parse slice names and sliver ids
98 if xrn.type == 'slice':
99 names.add(xrn.get_slice_name())
100 elif xrn.type == 'sliver':
107 filter['name'] = names
110 servers = self.driver.shell.nova_manager.servers.findall(**filter)
111 instances.extend(servers)
115 def instance_to_rspec_node(self, instance):
117 node_xrn = instance.metadata.get('component_id')
119 node_xrn = OSXrn('cloud', type='node')
121 node_xrn = OSXrn(xrn=node_xrn, type='node')
124 rspec_node['component_id'] = node_xrn.urn
125 rspec_node['component_name'] = node_xrn.name
126 rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
127 rspec_node['sliver_id'] = OSXrn(name=instance.name, type='slice', id=instance.id).get_urn()
128 if instance.metadata.get('client_id'):
129 rspec_node['client_id'] = instance.metadata.get('client_id')
132 flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
133 sliver = self.instance_to_sliver(flavor)
136 group_name = instance.metadata.get('security_groups')
138 group = self.driver.shell.nova_manager.security_groups.find(name=group_name)
139 for rule in group.rules:
140 port_range ="%s:%s" % (rule['from_port'], rule['to_port'])
141 fw_rule = FWRule({'protocol': rule['ip_protocol'],
142 'port_range': port_range,
143 'cidr_ip': rule['ip_range']['cidr']})
144 fw_rules.append(fw_rule)
145 sliver['fw_rules'] = fw_rules
146 rspec_node['slivers'] = [sliver]
149 image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
150 if isinstance(image, list) and len(image) > 0:
152 disk_image = image_to_rspec_disk_image(image)
153 sliver['disk_image'] = [disk_image]
156 rspec_node['services'] = []
157 rspec_node['interfaces'] = []
158 addresses = instance.addresses
159 # HACK: public ips are stored in the list of private, but
160 # this seems wrong. Assume pub ip is the last in the list of
161 # private ips until openstack bug is fixed.
162 if addresses.get('private'):
163 login = Login({'authentication': 'ssh-keys',
164 'hostname': addresses.get('private')[-1]['addr'],
165 'port':'22', 'username': 'root'})
166 service = Services({'login': login})
167 rspec_node['services'].append(service)
169 for private_ip in addresses.get('private', []):
170 if_xrn = PlXrn(auth=self.driver.hrn,
171 interface='node%s' % (instance.hostId))
172 if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
173 if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
174 interface = Interface({'component_id': if_xrn.urn,
175 'client_id': if_client_id,
176 'sliver_id': if_sliver_id})
177 interface['ips'] = [{'address': private_ip['addr'],
178 #'netmask': private_ip['network'],
179 'type': private_ip['version']}]
180 rspec_node['interfaces'].append(interface)
182 # slivers always provide the ssh service
183 for public_ip in addresses.get('public', []):
184 login = Login({'authentication': 'ssh-keys',
185 'hostname': public_ip['addr'],
186 'port':'22', 'username': 'root'})
187 service = Services({'login': login})
188 rspec_node['services'].append(service)
192 def instance_to_sliver(self, instance, xrn=None):
194 xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn()
196 sliver = Sliver({'sliver_id': xrn.get_urn(),
197 'name': instance.name,
198 'type': instance.name,
199 'cpus': str(instance.vcpus),
200 'memory': str(instance.ram),
201 'storage': str(instance.disk)})
204 def instance_to_geni_sliver(self, instance):
205 op_status = "geni_unknown"
206 state = instance.state.lower()
207 if state == 'active':
208 op_status = 'geni_ready'
209 elif state == 'building':
210 op_status = 'geni_configuring'
211 elif state == 'failed':
212 op_status =' geni_failed'
214 sliver_hrn = '%s.%s' % (root_hrn, instance.id)
215 sliver_id = Xrn(sliver_hrn, type='sliver').urn
217 geni_sliver = {'geni_sliver_urn': sliver_id,
218 'geni_expires': None,
219 'geni_allocation_status': 'geni_provisioned',
220 'geni_operational_status': op_status,
222 'plos_created_at': datetime_to_string(utcparse(instance.created)),
223 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
229 def get_aggregate_nodes(self):
230 zones = self.get_availability_zones()
231 # available sliver/instance/vm types
232 instances = self.driver.shell.nova_manager.flavors.list()
233 if isinstance(instances, dict):
234 instances = instances.values()
236 images = self.driver.shell.image_manager.get_images_detailed()
237 disk_images = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
241 xrn = OSXrn(zone, type='node')
242 rspec_node['component_id'] = xrn.urn
243 rspec_node['component_name'] = xrn.name
244 rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
245 rspec_node['exclusive'] = 'false'
246 rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
247 HardwareType({'name': 'pc'})]
249 for instance in instances:
250 sliver = self.instance_to_sliver(instance)
251 sliver['disk_image'] = disk_images
252 slivers.append(sliver)
253 rspec_node['available'] = 'true'
254 rspec_node['slivers'] = slivers
255 rspec_nodes.append(rspec_node)
259 def create_tenant(self, tenant_name):
260 tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
262 self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
263 tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
268 def create_instance_key(self, slice_hrn, user):
269 slice_name = Xrn(slice_hrn).leaf
270 user_name = Xrn(user['urn']).leaf
271 key_name = "%s_%s" % (slice_name, user_name)
272 pubkey = user['keys'][0]
274 existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
275 for existing_key in existing_keys:
276 if existing_key.public_key != pubkey:
277 self.driver.shell.nova_manager.keypairs.delete(existing_key)
278 elif existing_key.public_key == pubkey:
282 self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
286 def create_security_group(self, slicename, fw_rules=[]):
287 # use default group by default
288 group_name = 'default'
289 if isinstance(fw_rules, list) and fw_rules:
290 # Each sliver get's its own security group.
291 # Keep security group names unique by appending some random
293 random_name = "".join([random.choice(string.letters+string.digits)
295 group_name = slicename + random_name
296 security_group = SecurityGroup(self.driver)
297 security_group.create_security_group(group_name)
298 for rule in fw_rules:
299 security_group.add_rule_to_group(group_name,
300 protocol = rule.get('protocol'),
301 cidr_ip = rule.get('cidr_ip'),
302 port_range = rule.get('port_range'),
303 icmp_type_code = rule.get('icmp_type_code'))
306 def add_rule_to_security_group(self, group_name, **kwds):
307 security_group = SecurityGroup(self.driver)
308 security_group.add_rule_to_group(group_name=group_name,
309 protocol=kwds.get('protocol'),
310 cidr_ip =kwds.get('cidr_ip'),
311 icmp_type_code = kwds.get('icmp_type_code'))
315 def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
316 #logger.debug('Reserving an instance: image: %s, flavor: ' \
317 # '%s, key: %s, name: %s' % \
318 # (image_id, flavor_id, key_name, slicename))
320 # make sure a tenant exists for this slice
321 tenant = self.create_tenant(tenant_name)
323 # add the sfa admin user to this tenant and update our nova client connection
324 # to use these credentials for the rest of this session. This emsures that the instances
325 # we create will be assigned to the correct tenant.
326 sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
327 user_role = self.driver.shell.auth_manager.roles.find(name='user')
328 admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
329 self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
330 self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
331 self.driver.shell.nova_manager.connect(tenant=tenant.name)
333 authorized_keys = "\n".join(pubkeys)
334 files = {'/root/.ssh/authorized_keys': authorized_keys}
336 requested_instances = defaultdict(list)
337 # iterate over clouds/zones/nodes
339 for node in rspec.version.get_nodes_with_slivers():
340 instances = node.get('slivers', [])
343 for instance in instances:
346 flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
347 image = instance.get('disk_image')
348 if image and isinstance(image, list):
350 image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
351 fw_rules = instance.get('fw_rules', [])
352 group_name = self.create_security_group(instance_name, fw_rules)
353 metadata['security_groups'] = group_name
354 if node.get('component_id'):
355 metadata['component_id'] = node['component_id']
356 if node.get('client_id'):
357 metadata['client_id'] = node['client_id']
358 server = self.driver.shell.nova_manager.servers.create(
362 security_groups = [group_name],
366 slivers.append(server)
367 except Exception, err:
372 def delete_instance(self, instance):
374 def _delete_security_group(inst):
375 security_group = inst.metadata.get('security_groups', '')
377 manager = SecurityGroup(self.driver)
378 timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
379 start_time = time.time()
380 instance_deleted = False
381 while instance_deleted == False and (time.time() - start_time) < timeout:
382 tmp_inst = self.driver.shell.nova_manager.servers.findall(id=inst.id)
384 instance_deleted = True
386 manager.delete_security_group(security_group)
388 thread_manager = ThreadManager()
389 tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id)
390 self.driver.shell.nova_manager.connect(tenant=tenant.name)
391 args = {'name': instance.name,
393 instances = self.driver.shell.nova_manager.servers.findall(**args)
394 security_group_manager = SecurityGroup(self.driver)
395 for instance in instances:
397 self.driver.shell.nova_manager.servers.delete(instance)
398 # deleate this instance's security groups
399 thread_manager.run(_delete_security_group, instance)
402 def stop_instances(self, instance_name, tenant_name, id=None):
403 self.driver.shell.nova_manager.connect(tenant=tenant_name)
404 args = {'name': instance_name}
407 instances = self.driver.shell.nova_manager.servers.findall(**args)
408 for instance in instances:
409 self.driver.shell.nova_manager.servers.pause(instance)
412 def start_instances(self, instance_name, tenant_name, id=None):
413 self.driver.shell.nova_manager.connect(tenant=tenant_name)
414 args = {'name': instance_name}
417 instances = self.driver.shell.nova_manager.servers.findall(**args)
418 for instance in instances:
419 self.driver.shell.nova_manager.servers.resume(instance)
422 def restart_instances(self, instacne_name, tenant_name, id=None):
423 self.stop_instances(instance_name, tenant_name, id)
424 self.start_instances(instance_name, tenant_name, id)
427 def update_instances(self, project_name):