8 from collections import defaultdict
9 from nova.exception import ImageNotFound
10 from nova.api.ec2.cloud import CloudController
11 from sfa.util.faults import SfaAPIError, InvalidRSpec
12 from sfa.rspecs.rspec import RSpec
13 from sfa.rspecs.elements.hardware_type import HardwareType
14 from sfa.rspecs.elements.node import Node
15 from sfa.rspecs.elements.sliver import Sliver
16 from sfa.rspecs.elements.login import Login
17 from sfa.rspecs.elements.disk_image import DiskImage
18 from sfa.rspecs.elements.services import Services
19 from sfa.rspecs.elements.interface import Interface
20 from sfa.rspecs.elements.fw_rule import FWRule
21 from sfa.util.xrn import Xrn
22 from sfa.planetlab.plxrn import PlXrn
23 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
24 from sfa.rspecs.version_manager import VersionManager
25 from sfa.openstack.security_group import SecurityGroup
26 from sfa.server.threadmanager import ThreadManager
27 from sfa.util.sfalogging import logger
29 def pubkeys_to_user_data(pubkeys):
30 user_data = "#!/bin/bash\n\n"
31 for pubkey in pubkeys:
32 pubkey = pubkey.replace('\n', '')
33 user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
35 user_data += "echo >> /root/.ssh/authorized_keys"
39 def instance_to_sliver(instance, slice_xrn=None):
41 sliver = Sliver({'name': instance.name,
42 'type': instance.name,
43 'cpus': str(instance.vcpus),
44 'memory': str(instance.ram),
45 'storage': str(instance.disk)})
48 def image_to_rspec_disk_image(image):
50 img['name'] = image['name']
51 img['description'] = image['name']
52 img['os'] = image['name']
53 img['version'] = image['name']
58 def __init__(self, driver):
61 def get_rspec(self, slice_xrn=None, version=None, options={}):
62 version_manager = VersionManager()
63 version = version_manager.get_version(version)
65 rspec_version = version_manager._get_version(version.type, version.version, 'ad')
66 nodes = self.get_aggregate_nodes()
68 rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
69 nodes = self.get_slice_nodes(slice_xrn)
70 rspec = RSpec(version=rspec_version, user_options=options)
71 rspec.version.add_nodes(nodes)
74 def get_availability_zones(self):
76 zones = self.driver.shell.nova_manager.dns_domains.domains()
81 zones = [zone.name for zone in zones]
84 def instance_to_rspec_node(self, slice_xrn, instance):
86 node_xrn = instance.metadata.get('component_id')
88 node_xrn = OSXrn('cloud', type='node')
90 node_xrn = OSXrn(xrn=node_xrn, type='node')
93 rspec_node['component_id'] = node_xrn.urn
94 rspec_node['component_name'] = node_xrn.name
95 rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
96 if instance.metadata.get('client_id'):
97 rspec_node['client_id'] = instance.metadata.get('client_id')
100 sliver_xrn = OSXrn(xrn=slice_xrn, type='slice', id=instance.id)
101 rspec_node['sliver_id'] = sliver_xrn.get_urn()
102 flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
103 sliver = instance_to_sliver(flavor)
106 group_name = instance.metadata.get('security_groups')
108 group = self.driver.shell.nova_manager.security_groups.find(name=group_name)
109 for rule in group.rules:
110 port_range ="%s:%s" % (rule['from_port'], rule['to_port'])
111 fw_rule = FWRule({'protocol': rule['ip_protocol'],
112 'port_range': port_range,
113 'cidr_ip': rule['ip_range']['cidr']})
114 fw_rules.append(fw_rule)
115 sliver['fw_rules'] = fw_rules
116 rspec_node['slivers']= [sliver]
118 image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
119 if isinstance(image, list) and len(image) > 0:
121 disk_image = image_to_rspec_disk_image(image)
122 sliver['disk_image'] = [disk_image]
125 rspec_node['services'] = []
126 rspec_node['interfaces'] = []
127 addresses = instance.addresses
128 # HACK: public ips are stored in the list of private, but
129 # this seems wrong. Assume pub ip is the last in the list of
130 # private ips until openstack bug is fixed.
131 if addresses.get('private'):
132 login = Login({'authentication': 'ssh-keys',
133 'hostname': addresses.get('private')[-1]['addr'],
134 'port':'22', 'username': 'root'})
135 service = Services({'login': login})
136 rspec_node['services'].append(service)
139 for private_ip in addresses.get('private', []):
140 if_xrn = PlXrn(auth=self.driver.hrn,
141 interface='node%s' % (instance.hostId))
142 if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
143 if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
144 interface = Interface({'component_id': if_xrn.urn,
145 'client_id': if_client_id,
146 'sliver_id': if_sliver_id})
147 interface['ips'] = [{'address': private_ip['addr'],
148 #'netmask': private_ip['network'],
149 'type': 'ipv%s' % str(private_ip['version'])}]
150 rspec_node['interfaces'].append(interface)
153 # slivers always provide the ssh service
154 for public_ip in addresses.get('public', []):
155 login = Login({'authentication': 'ssh-keys',
156 'hostname': public_ip['addr'],
157 'port':'22', 'username': 'root'})
158 service = Services({'login': login})
159 rspec_node['services'].append(service)
162 def get_slice_nodes(self, slice_xrn):
163 # update nova connection
164 tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
165 self.driver.shell.nova_manager.connect(tenant=tenant_name)
167 zones = self.get_availability_zones()
168 name = hrn_to_os_slicename(slice_xrn)
169 instances = self.driver.shell.nova_manager.servers.findall(name=name)
171 for instance in instances:
172 rspec_nodes.append(self.instance_to_rspec_node(slice_xrn, instance))
175 def get_aggregate_nodes(self):
176 zones = self.get_availability_zones()
177 # available sliver/instance/vm types
178 instances = self.driver.shell.nova_manager.flavors.list()
179 if isinstance(instances, dict):
180 instances = instances.values()
182 images = self.driver.shell.image_manager.get_images_detailed()
183 disk_images = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
187 xrn = OSXrn(zone, type='node')
188 rspec_node['component_id'] = xrn.urn
189 rspec_node['component_name'] = xrn.name
190 rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
191 rspec_node['exclusive'] = 'false'
192 rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
193 HardwareType({'name': 'pc'})]
195 for instance in instances:
196 sliver = instance_to_sliver(instance)
197 sliver['disk_image'] = disk_images
198 slivers.append(sliver)
200 rspec_node['slivers'] = slivers
201 rspec_nodes.append(rspec_node)
206 def create_tenant(self, tenant_name):
207 tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
209 self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
210 tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
216 def create_instance_key(self, slice_hrn, user):
217 slice_name = Xrn(slice_hrn).leaf
218 user_name = Xrn(user['urn']).leaf
219 key_name = "%s_%s" % (slice_name, user_name)
220 pubkey = user['keys'][0]
222 existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
223 for existing_key in existing_keys:
224 if existing_key.public_key != pubkey:
225 self.driver.shell.nova_manager.keypairs.delete(existing_key)
226 elif existing_key.public_key == pubkey:
230 self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
234 def create_security_group(self, slicename, fw_rules=[]):
235 # use default group by default
236 group_name = 'default'
237 if isinstance(fw_rules, list) and fw_rules:
238 # Each sliver get's its own security group.
239 # Keep security group names unique by appending some random
241 random_name = "".join([random.choice(string.letters+string.digits)
243 group_name = slicename + random_name
244 security_group = SecurityGroup(self.driver)
245 security_group.create_security_group(group_name)
246 for rule in fw_rules:
247 security_group.add_rule_to_group(group_name,
248 protocol = rule.get('protocol'),
249 cidr_ip = rule.get('cidr_ip'),
250 port_range = rule.get('port_range'),
251 icmp_type_code = rule.get('icmp_type_code'))
252 # Open ICMP by default
253 security_group.add_rule_to_group(group_name,
255 cidr_ip = "0.0.0.0/0",
256 icmp_type_code = "-1:-1")
259 def add_rule_to_security_group(self, group_name, **kwds):
260 security_group = SecurityGroup(self.driver)
261 security_group.add_rule_to_group(group_name=group_name,
262 protocol=kwds.get('protocol'),
263 cidr_ip =kwds.get('cidr_ip'),
264 icmp_type_code = kwds.get('icmp_type_code'))
268 def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
269 #logger.debug('Reserving an instance: image: %s, flavor: ' \
270 # '%s, key: %s, name: %s' % \
271 # (image_id, flavor_id, key_name, slicename))
273 # make sure a tenant exists for this slice
274 tenant = self.create_tenant(tenant_name)
276 # add the sfa admin user to this tenant and update our nova client connection
277 # to use these credentials for the rest of this session. This emsures that the instances
278 # we create will be assigned to the correct tenant.
279 sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
280 user_role = self.driver.shell.auth_manager.roles.find(name='user')
281 admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
282 self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
283 self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
284 self.driver.shell.nova_manager.connect(tenant=tenant.name)
286 authorized_keys = "\n".join(pubkeys)
287 files = {'/root/.ssh/authorized_keys': authorized_keys}
289 requested_instances = defaultdict(list)
291 # iterate over clouds/zones/nodes
292 created_instances = []
293 for node in rspec.version.get_nodes_with_slivers():
294 instances = node.get('slivers', [])
297 for instance in instances:
300 flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
301 image = instance.get('disk_image')
302 if image and isinstance(image, list):
305 raise InvalidRSpec("Must specify a disk_image for each VM")
306 image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
307 fw_rules = instance.get('fw_rules', [])
308 group_name = self.create_security_group(instance_name, fw_rules)
309 metadata['security_groups'] = group_name
310 if node.get('component_id'):
311 metadata['component_id'] = node['component_id']
312 if node.get('client_id'):
313 metadata['client_id'] = node['client_id']
314 server = self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
317 security_groups = [group_name],
321 created_instances.append(server)
323 except Exception, err:
326 return created_instances
329 def delete_instances(self, instance_name, tenant_name):
331 def _delete_security_group(instance):
332 security_group = instance.metadata.get('security_groups', '')
334 manager = SecurityGroup(self.driver)
335 timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
336 start_time = time.time()
337 instance_deleted = False
338 while instance_deleted == False and (time.time() - start_time) < timeout:
339 inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
341 instance_deleted = True
343 manager.delete_security_group(security_group)
345 thread_manager = ThreadManager()
346 self.driver.shell.nova_manager.connect(tenant=tenant_name)
347 instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
348 for instance in instances:
350 self.driver.shell.nova_manager.servers.delete(instance)
351 # deleate this instance's security groups
352 thread_manager.run(_delete_security_group, instance)
356 def stop_instances(self, instance_name, tenant_name):
357 self.driver.shell.nova_manager.connect(tenant=tenant_name)
358 instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
359 for instance in instances:
360 self.driver.shell.nova_manager.servers.pause(instance)
363 def update_instances(self, project_name):