Open ICMP by default, report error if no disk image specified
[sfa.git] / sfa / openstack / osaggregate.py
1
2 import os
3 import socket
4 import base64
5 import string
6 import random
7 import time
8 from collections import defaultdict
9 from nova.exception import ImageNotFound
10 from nova.api.ec2.cloud import CloudController
11 from sfa.util.faults import SfaAPIError, InvalidRSpec
12 from sfa.rspecs.rspec import RSpec
13 from sfa.rspecs.elements.hardware_type import HardwareType
14 from sfa.rspecs.elements.node import Node
15 from sfa.rspecs.elements.sliver import Sliver
16 from sfa.rspecs.elements.login import Login
17 from sfa.rspecs.elements.disk_image import DiskImage
18 from sfa.rspecs.elements.services import Services
19 from sfa.rspecs.elements.interface import Interface
20 from sfa.util.xrn import Xrn
21 from sfa.planetlab.plxrn import PlXrn 
22 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
23 from sfa.rspecs.version_manager import VersionManager
24 from sfa.openstack.security_group import SecurityGroup
25 from sfa.server.threadmanager import ThreadManager
26 from sfa.util.sfalogging import logger
27
28 def pubkeys_to_user_data(pubkeys):
29     user_data = "#!/bin/bash\n\n"
30     for pubkey in pubkeys:
31         pubkey = pubkey.replace('\n', '')
32         user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
33         user_data += "\n"
34         user_data += "echo >> /root/.ssh/authorized_keys"
35         user_data += "\n"
36     return user_data
37
38 def instance_to_sliver(instance, slice_xrn=None):
39     sliver_id = None
40     if slice_xrn:
41         xrn = Xrn(slice_xrn, 'slice')
42         sliver_id = xrn.get_sliver_id(instance.project_id, instance.hostname, instance.id)
43
44     sliver = Sliver({'slice_id': sliver_id,
45                      'name': instance.name,
46                      'type': instance.name,
47                      'cpus': str(instance.vcpus),
48                      'memory': str(instance.ram),
49                      'storage':  str(instance.disk)})
50     return sliver
51
52 def image_to_rspec_disk_image(image):
53     img = DiskImage()
54     img['name'] = image['name']
55     img['description'] = image['name']
56     img['os'] = image['name']
57     img['version'] = image['name']    
58     return img
59     
60 class OSAggregate:
61
62     def __init__(self, driver):
63         self.driver = driver
64
65     def get_rspec(self, slice_xrn=None, version=None, options={}):
66         version_manager = VersionManager()
67         version = version_manager.get_version(version)
68         if not slice_xrn:
69             rspec_version = version_manager._get_version(version.type, version.version, 'ad')
70             nodes = self.get_aggregate_nodes()
71         else:
72             rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
73             nodes = self.get_slice_nodes(slice_xrn)
74         rspec = RSpec(version=rspec_version, user_options=options)
75         rspec.version.add_nodes(nodes)
76         return rspec.toxml()
77
78     def get_availability_zones(self):
79         # essex release
80         zones = self.driver.shell.nova_manager.dns_domains.domains()
81
82         if not zones:
83             zones = ['cloud']
84         else:
85             zones = [zone.name for zone in zones]
86         return zones
87
88     def get_slice_nodes(self, slice_xrn):
89         # update nova connection
90         tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
91         self.driver.shell.nova_manager.connect(tenant=tenant_name)    
92         
93         zones = self.get_availability_zones()
94         name = hrn_to_os_slicename(slice_xrn)
95         instances = self.driver.shell.nova_manager.servers.findall(name=name)
96         rspec_nodes = []
97         for instance in instances:
98             # determine node urn
99             node_xrn = instance.metadata.get('component_id')
100             if not node_xrn:
101                 node_xrn = OSXrn('cloud', type='node')
102             else:
103                 node_xrn = OSXrn(xrn=node_xrn, type='node')
104
105             rspec_node = Node()
106             rspec_node['component_id'] = node_xrn.urn
107             rspec_node['component_name'] = node_xrn.name
108             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
109             rspec_node['slivers'] = []
110
111             if instance.metadata.get('client_id'):
112                 rspec_node['client_id'] = instance.metadata.get('client_id')
113             
114             flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
115             sliver = instance_to_sliver(flavor)
116             rspec_node['slivers'].append(sliver)
117             image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
118             if isinstance(image, list) and len(image) > 0:
119                 image = image[0]
120             disk_image = image_to_rspec_disk_image(image)
121             sliver['disk_image'] = [disk_image]
122
123             # build interfaces            
124             rspec_node['services'] = []
125             rspec_node['interfaces'] = []
126             addresses = instance.addresses
127             # HACK: public ips are stored in the list of private, but 
128             # this seems wrong. Assume pub ip is the last in the list of 
129             # private ips until openstack bug is fixed.      
130             if addresses.get('private'):
131                 login = Login({'authentication': 'ssh-keys',
132                                'hostname': addresses.get('private')[-1]['addr'],
133                                'port':'22', 'username': 'root'})
134                 service = Services({'login': login})
135                 rspec_node['services'].append(service)    
136             
137             for private_ip in addresses.get('private', []):
138                 if_xrn = PlXrn(auth=self.driver.hrn, 
139                                interface='node%s:eth0' % (instance.hostId)) 
140                 interface = Interface({'component_id': if_xrn.urn})
141                 interface['ips'] =  [{'address': private_ip['addr'],
142                                      #'netmask': private_ip['network'],
143                                      'type': private_ip['version']}]
144                 rspec_node['interfaces'].append(interface) 
145             
146             # slivers always provide the ssh service
147             for public_ip in addresses.get('public', []):
148                 login = Login({'authentication': 'ssh-keys', 
149                                'hostname': public_ip['addr'], 
150                                'port':'22', 'username': 'root'})
151                 service = Services({'login': login})
152                 rspec_node['services'].append(service)
153             rspec_nodes.append(rspec_node)
154         return rspec_nodes
155
156     def get_aggregate_nodes(self):
157         zones = self.get_availability_zones()
158         # available sliver/instance/vm types
159         instances = self.driver.shell.nova_manager.flavors.list()
160         if isinstance(instances, dict):
161             instances = instances.values()
162         # available images
163         images = self.driver.shell.image_manager.get_images_detailed()
164         disk_images  = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
165         rspec_nodes = []
166         for zone in zones:
167             rspec_node = Node()
168             xrn = OSXrn(zone, type='node')
169             rspec_node['component_id'] = xrn.urn
170             rspec_node['component_name'] = xrn.name
171             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
172             rspec_node['exclusive'] = 'false'
173             rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
174                                                 HardwareType({'name': 'pc'})]
175             slivers = []
176             for instance in instances:
177                 sliver = instance_to_sliver(instance)
178                 sliver['disk_image'] = disk_images
179                 slivers.append(sliver)
180         
181             rspec_node['slivers'] = slivers
182             rspec_nodes.append(rspec_node) 
183
184         return rspec_nodes 
185
186
187     def create_tenant(self, tenant_name):
188         tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
189         if not tenants:
190             self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
191             tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
192         else:
193             tenant = tenants[0]
194         return tenant
195             
196
197     def create_instance_key(self, slice_hrn, user):
198         slice_name = Xrn(slice_hrn).leaf
199         user_name = Xrn(user['urn']).leaf
200         key_name = "%s_%s" % (slice_name, user_name)
201         pubkey = user['keys'][0]
202         key_found = False
203         existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
204         for existing_key in existing_keys:
205             if existing_key.public_key != pubkey:
206                 self.driver.shell.nova_manager.keypairs.delete(existing_key)
207             elif existing_key.public_key == pubkey:
208                 key_found = True
209
210         if not key_found:
211             self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
212         return key_name       
213         
214
215     def create_security_group(self, slicename, fw_rules=[]):
216         # use default group by default
217         group_name = 'default' 
218         if isinstance(fw_rules, list) and fw_rules:
219             # Each sliver get's its own security group.
220             # Keep security group names unique by appending some random
221             # characters on end.
222             random_name = "".join([random.choice(string.letters+string.digits)
223                                            for i in xrange(6)])
224             group_name = slicename + random_name 
225             security_group = SecurityGroup(self.driver)
226             security_group.create_security_group(group_name)
227             for rule in fw_rules:
228                 security_group.add_rule_to_group(group_name, 
229                                              protocol = rule.get('protocol'), 
230                                              cidr_ip = rule.get('cidr_ip'), 
231                                              port_range = rule.get('port_range'), 
232                                              icmp_type_code = rule.get('icmp_type_code'))
233             # Open ICMP by default
234             security_group.add_rule_to_group(group_name,
235                                              protocol = "icmp",
236                                              cidr_ip = "0.0.0.0/0",
237                                              icmp_type_code = "-1:-1")
238         return group_name
239
240     def add_rule_to_security_group(self, group_name, **kwds):
241         security_group = SecurityGroup(self.driver)
242         security_group.add_rule_to_group(group_name=group_name, 
243                                          protocol=kwds.get('protocol'), 
244                                          cidr_ip =kwds.get('cidr_ip'), 
245                                          icmp_type_code = kwds.get('icmp_type_code'))
246
247  
248
249     def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
250         #logger.debug('Reserving an instance: image: %s, flavor: ' \
251         #            '%s, key: %s, name: %s' % \
252         #            (image_id, flavor_id, key_name, slicename))
253
254         # make sure a tenant exists for this slice
255         tenant = self.create_tenant(tenant_name)  
256
257         # add the sfa admin user to this tenant and update our nova client connection
258         # to use these credentials for the rest of this session. This emsures that the instances
259         # we create will be assigned to the correct tenant.
260         sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
261         user_role = self.driver.shell.auth_manager.roles.find(name='user')
262         admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
263         self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
264         self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
265         self.driver.shell.nova_manager.connect(tenant=tenant.name)  
266
267         authorized_keys = "\n".join(pubkeys)
268         files = {'/root/.ssh/authorized_keys': authorized_keys}
269         rspec = RSpec(rspec)
270         requested_instances = defaultdict(list)
271         # iterate over clouds/zones/nodes
272         for node in rspec.version.get_nodes_with_slivers():
273             instances = node.get('slivers', [])
274             if not instances:
275                 continue
276             for instance in instances:
277                 try: 
278                     metadata = {}
279                     flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
280                     image = instance.get('disk_image')
281                     if image and isinstance(image, list):
282                         image = image[0]
283                     else:
284                         raise InvalidRSpec("Must specify a disk_image for each VM")
285                     image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
286                     fw_rules = instance.get('fw_rules', [])
287                     group_name = self.create_security_group(instance_name, fw_rules)
288                     metadata['security_groups'] = group_name
289                     if node.get('component_id'):
290                         metadata['component_id'] = node['component_id']
291                     if node.get('client_id'):
292                         metadata['client_id'] = node['client_id']
293                     self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
294                                                             image=image_id,
295                                                             key_name = key_name,
296                                                             security_groups = [group_name],
297                                                             files=files,
298                                                             meta=metadata, 
299                                                             name=instance_name)
300                 except Exception, err:    
301                     logger.log_exc(err)                                
302                            
303
304
305     def delete_instances(self, instance_name, tenant_name):
306
307         def _delete_security_group(instance):
308             security_group = instance.metadata.get('security_groups', '')
309             if security_group:
310                 manager = SecurityGroup(self.driver)
311                 timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
312                 start_time = time.time()
313                 instance_deleted = False
314                 while instance_deleted == False and (time.time() - start_time) < timeout:
315                     inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
316                     if not inst:
317                         instance_deleted = True
318                     time.sleep(.5)
319                 manager.delete_security_group(security_group)
320
321         thread_manager = ThreadManager()
322         self.driver.shell.nova_manager.connect(tenant=tenant_name)
323         instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
324         for instance in instances:
325             # destroy instance
326             self.driver.shell.nova_manager.servers.delete(instance)
327             # deleate this instance's security groups
328             thread_manager.run(_delete_security_group, instance)
329         return 1
330
331
332     def stop_instances(self, instance_name, tenant_name):
333         self.driver.shell.nova_manager.connect(tenant=tenant_name)
334         instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
335         for instance in instances:
336             self.driver.shell.nova_manager.servers.pause(instance)
337         return 1
338
339     def update_instances(self, project_name):
340         pass