7 from collections import defaultdict
8 from nova.exception import ImageNotFound
9 from nova.api.ec2.cloud import CloudController
10 from sfa.util.faults import SfaAPIError
11 from sfa.rspecs.rspec import RSpec
12 from sfa.rspecs.elements.hardware_type import HardwareType
13 from sfa.rspecs.elements.node import Node
14 from sfa.rspecs.elements.sliver import Sliver
15 from sfa.rspecs.elements.login import Login
16 from sfa.rspecs.elements.disk_image import DiskImage
17 from sfa.rspecs.elements.services import Services
18 from sfa.rspecs.elements.interface import Interface
19 from sfa.util.xrn import Xrn
20 from sfa.planetlab.plxrn import PlXrn
21 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
22 from sfa.rspecs.version_manager import VersionManager
23 from sfa.openstack.security_group import SecurityGroup
24 from sfa.util.sfalogging import logger
26 def pubkeys_to_user_data(pubkeys):
27 user_data = "#!/bin/bash\n\n"
28 for pubkey in pubkeys:
29 pubkey = pubkey.replace('\n', '')
30 user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
32 user_data += "echo >> /root/.ssh/authorized_keys"
36 def instance_to_sliver(instance, xrn=None):
39 sliver_xrn = Xrn(xrn=slice_xrn, type='slice', id=instance.id).get_urn()
41 sliver = Sliver({'slice_id': sliver_urn,
42 'name': instance.name,
43 'type': instance.name,
44 'cpus': str(instance.vcpus),
45 'memory': str(instance.ram),
46 'storage': str(instance.disk)})
49 def image_to_rspec_disk_image(image):
51 img['name'] = image['name']
52 img['description'] = image['name']
53 img['os'] = image['name']
54 img['version'] = image['name']
59 def __init__(self, driver):
62 def get_availability_zones(self):
63 zones = self.driver.shell.nova_manager.dns_domains.domains()
67 zones = [zone.name for zone in zones]
71 def describe(self, urns, version=None, options={}):
75 def list_resources(self, version=None, options={}):
76 version_manager = VersionManager()
77 version = version_manager.get_version(version)
78 rspec_version = version_manager._get_version(version.type, version.version, 'ad')
79 rspec = RSpec(version=version, user_options=options)
80 nodes = self.get_aggregate_nodes()
81 rspec.version.add_nodes(nodes)
84 def describe(self, urns, version=None, options={}):
85 version_manager = VersionManager()
86 version = version_manager.get_version(version)
87 rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
88 rspec = RSpec(version=version, user_options=options)
89 nodes = self.get_slice_nodes(slice_xrn)
90 rspec.version.add_nodes(nodes)
91 result = {'geni_urn': '',
92 'geni_rspec': rspec.toxml(),
97 def get_slice_nodes(self, slice_xrn):
98 # update nova connection
99 tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
100 self.driver.shell.nova_manager.connect(tenant=tenant_name)
102 zones = self.get_availability_zones()
103 name = hrn_to_os_slicename(slice_xrn)
104 instances = self.driver.shell.nova_manager.servers.findall(name=name)
106 for instance in instances:
108 #TODO: find a way to look up an instances availability zone in essex
109 #if instance.availability_zone:
110 # node_xrn = OSXrn(instance.availability_zone, 'node')
112 # node_xrn = OSXrn('cloud', 'node')
113 node_xrn = instance.metadata.get('component_id')
116 node_xrn = OSXrn('cloud', type='node')
118 node_xrn = OSXrn(xrn=node_xrn, type='node')
120 rspec_node['component_id'] = node_xrn.urn
121 rspec_node['component_name'] = node_xrn.name
122 rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
123 flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
124 sliver = instance_to_sliver(flavor)
125 rspec_node['slivers'] = [sliver]
126 image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
127 if isinstance(image, list) and len(image) > 0:
129 disk_image = image_to_rspec_disk_image(image)
130 sliver['disk_image'] = [disk_image]
133 rspec_node['services'] = []
134 rspec_node['interfaces'] = []
135 addresses = instance.addresses
136 # HACK: public ips are stored in the list of private, but
137 # this seems wrong. Assume pub ip is the last in the list of
138 # private ips until openstack bug is fixed.
139 if addresses.get('private'):
140 login = Login({'authentication': 'ssh-keys',
141 'hostname': addresses.get('private')[-1]['addr'],
142 'port':'22', 'username': 'root'})
143 service = Services({'login': login})
144 rspec_node['services'].append(service)
146 for private_ip in addresses.get('private', []):
147 if_xrn = PlXrn(auth=self.driver.hrn,
148 interface='node%s:eth0' % (instance.hostId))
149 interface = Interface({'component_id': if_xrn.urn})
150 interface['ips'] = [{'address': private_ip['addr'],
151 #'netmask': private_ip['network'],
152 'type': private_ip['version']}]
153 rspec_node['interfaces'].append(interface)
155 # slivers always provide the ssh service
156 for public_ip in addresses.get('public', []):
157 login = Login({'authentication': 'ssh-keys',
158 'hostname': public_ip['addr'],
159 'port':'22', 'username': 'root'})
160 service = Services({'login': login})
161 rspec_node['services'].append(service)
162 rspec_nodes.append(rspec_node)
165 def get_aggregate_nodes(self):
166 zones = self.get_availability_zones()
167 # available sliver/instance/vm types
168 instances = self.driver.shell.nova_manager.flavors.list()
169 if isinstance(instances, dict):
170 instances = instances.values()
172 images = self.driver.shell.image_manager.get_images_detailed()
173 disk_images = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
177 xrn = OSXrn(zone, type='node')
178 rspec_node['component_id'] = xrn.urn
179 rspec_node['component_name'] = xrn.name
180 rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
181 rspec_node['exclusive'] = 'false'
182 rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
183 HardwareType({'name': 'pc'})]
185 for instance in instances:
186 sliver = instance_to_sliver(instance)
187 sliver['disk_image'] = disk_images
188 slivers.append(sliver)
190 rspec_node['slivers'] = slivers
191 rspec_nodes.append(rspec_node)
196 def create_tenant(self, tenant_name):
197 tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
199 self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
200 tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
206 def create_instance_key(self, slice_hrn, user):
207 slice_name = Xrn(slice_hrn).leaf
208 user_name = Xrn(user['urn']).leaf
209 key_name = "%s_%s" % (slice_name, user_name)
210 pubkey = user['keys'][0]
212 existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
213 for existing_key in existing_keys:
214 if existing_key.public_key != pubkey:
215 self.driver.shell.nova_manager.keypairs.delete(existing_key)
216 elif existing_key.public_key == pubkey:
220 self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
224 def create_security_group(self, slicename, fw_rules=[]):
225 # use default group by default
226 group_name = 'default'
227 if isinstance(fw_rules, list) and fw_rules:
228 # Each sliver get's its own security group.
229 # Keep security group names unique by appending some random
231 random_name = "".join([random.choice(string.letters+string.digits)
233 group_name = slicename + random_name
234 security_group = SecurityGroup(self.driver)
235 security_group.create_security_group(group_name)
236 for rule in fw_rules:
237 security_group.add_rule_to_group(group_name,
238 protocol = rule.get('protocol'),
239 cidr_ip = rule.get('cidr_ip'),
240 port_range = rule.get('port_range'),
241 icmp_type_code = rule.get('icmp_type_code'))
244 def add_rule_to_security_group(self, group_name, **kwds):
245 security_group = SecurityGroup(self.driver)
246 security_group.add_rule_to_group(group_name=group_name,
247 protocol=kwds.get('protocol'),
248 cidr_ip =kwds.get('cidr_ip'),
249 icmp_type_code = kwds.get('icmp_type_code'))
253 def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
254 #logger.debug('Reserving an instance: image: %s, flavor: ' \
255 # '%s, key: %s, name: %s' % \
256 # (image_id, flavor_id, key_name, slicename))
258 # make sure a tenant exists for this slice
259 tenant = self.create_tenant(tenant_name)
261 # add the sfa admin user to this tenant and update our nova client connection
262 # to use these credentials for the rest of this session. This emsures that the instances
263 # we create will be assigned to the correct tenant.
264 sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
265 user_role = self.driver.shell.auth_manager.roles.find(name='user')
266 admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
267 self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
268 self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
269 self.driver.shell.nova_manager.connect(tenant=tenant.name)
271 authorized_keys = "\n".join(pubkeys)
272 files = {'/root/.ssh/authorized_keys': authorized_keys}
274 requested_instances = defaultdict(list)
275 # iterate over clouds/zones/nodes
276 for node in rspec.version.get_nodes_with_slivers():
277 instances = node.get('slivers', [])
280 for instance in instances:
283 flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
284 image = instance.get('disk_image')
285 if image and isinstance(image, list):
287 image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
288 fw_rules = instance.get('fw_rules', [])
289 group_name = self.create_security_group(instance_name, fw_rules)
290 metadata['security_groups'] = group_name
291 if node.get('component_id'):
292 metadata['component_id'] = node['component_id']
293 self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
296 security_groups = [group_name],
300 except Exception, err:
305 def delete_instances(self, instance_name, tenant_name):
306 self.driver.shell.nova_manager.connect(tenant=tenant_name)
307 instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
308 security_group_manager = SecurityGroup(self.driver)
309 for instance in instances:
310 # deleate this instance's security groups
311 security_group = instance.metadata.get('security_groups', '')
313 # dont delete the default security group
314 if security_group != 'default':
315 security_group_manager.delete_security_group(security_group)
317 self.driver.shell.nova_manager.servers.delete(instance)
320 def stop_instances(self, instance_name, tenant_name):
321 self.driver.shell.nova_manager.connect(tenant=tenant_name)
322 instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
323 for instance in instances:
324 self.driver.shell.nova_manager.servers.pause(instance)
327 def update_instances(self, project_name):