interface elements in the manifest rspec should contain a sliver id attribute
[sfa.git] / sfa / openstack / osaggregate.py
1
2 import os
3 import socket
4 import base64
5 import string
6 import random
7 import time
8 from collections import defaultdict
9 from nova.exception import ImageNotFound
10 from nova.api.ec2.cloud import CloudController
11 from sfa.util.faults import SfaAPIError, InvalidRSpec
12 from sfa.rspecs.rspec import RSpec
13 from sfa.rspecs.elements.hardware_type import HardwareType
14 from sfa.rspecs.elements.node import Node
15 from sfa.rspecs.elements.sliver import Sliver
16 from sfa.rspecs.elements.login import Login
17 from sfa.rspecs.elements.disk_image import DiskImage
18 from sfa.rspecs.elements.services import Services
19 from sfa.rspecs.elements.interface import Interface
20 from sfa.util.xrn import Xrn
21 from sfa.planetlab.plxrn import PlXrn 
22 from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
23 from sfa.rspecs.version_manager import VersionManager
24 from sfa.openstack.security_group import SecurityGroup
25 from sfa.server.threadmanager import ThreadManager
26 from sfa.util.sfalogging import logger
27
28 def pubkeys_to_user_data(pubkeys):
29     user_data = "#!/bin/bash\n\n"
30     for pubkey in pubkeys:
31         pubkey = pubkey.replace('\n', '')
32         user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
33         user_data += "\n"
34         user_data += "echo >> /root/.ssh/authorized_keys"
35         user_data += "\n"
36     return user_data
37
38 def instance_to_sliver(instance, slice_xrn=None):
39     sliver_id = None
40     sliver = Sliver({'name': instance.name,
41                      'type': instance.name,
42                      'cpus': str(instance.vcpus),
43                      'memory': str(instance.ram),
44                      'storage':  str(instance.disk)})
45     return sliver
46
47 def image_to_rspec_disk_image(image):
48     img = DiskImage()
49     img['name'] = image['name']
50     img['description'] = image['name']
51     img['os'] = image['name']
52     img['version'] = image['name']    
53     return img
54     
55 class OSAggregate:
56
57     def __init__(self, driver):
58         self.driver = driver
59
60     def get_rspec(self, slice_xrn=None, version=None, options={}):
61         version_manager = VersionManager()
62         version = version_manager.get_version(version)
63         if not slice_xrn:
64             rspec_version = version_manager._get_version(version.type, version.version, 'ad')
65             nodes = self.get_aggregate_nodes()
66         else:
67             rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
68             nodes = self.get_slice_nodes(slice_xrn)
69         rspec = RSpec(version=rspec_version, user_options=options)
70         rspec.version.add_nodes(nodes)
71         return rspec.toxml()
72
73     def get_availability_zones(self):
74         # essex release
75         zones = self.driver.shell.nova_manager.dns_domains.domains()
76
77         if not zones:
78             zones = ['cloud']
79         else:
80             zones = [zone.name for zone in zones]
81         return zones
82
83     def get_slice_nodes(self, slice_xrn):
84         # update nova connection
85         tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
86         self.driver.shell.nova_manager.connect(tenant=tenant_name)    
87         
88         zones = self.get_availability_zones()
89         name = hrn_to_os_slicename(slice_xrn)
90         instances = self.driver.shell.nova_manager.servers.findall(name=name)
91         rspec_nodes = []
92         for instance in instances:
93             # determine node urn
94             node_xrn = instance.metadata.get('component_id')
95             if not node_xrn:
96                 node_xrn = OSXrn('cloud', type='node')
97             else:
98                 node_xrn = OSXrn(xrn=node_xrn, type='node')
99
100             rspec_node = Node()
101             rspec_node['component_id'] = node_xrn.urn
102             rspec_node['component_name'] = node_xrn.name
103             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
104             rspec_node['slivers'] = []
105
106             if instance.metadata.get('client_id'):
107                 rspec_node['client_id'] = instance.metadata.get('client_id')
108             
109             flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
110             sliver = instance_to_sliver(flavor)
111             rspec_node['slivers'].append(sliver)
112             sliver_xrn = OSXrn(xrn=slice_xrn, type='slice', id=instance.id)
113             rspec_node['sliver_id'] = sliver_xrn.get_urn()
114             image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
115             if isinstance(image, list) and len(image) > 0:
116                 image = image[0]
117             disk_image = image_to_rspec_disk_image(image)
118             sliver['disk_image'] = [disk_image]
119
120             # build interfaces            
121             rspec_node['services'] = []
122             rspec_node['interfaces'] = []
123             addresses = instance.addresses
124             # HACK: public ips are stored in the list of private, but 
125             # this seems wrong. Assume pub ip is the last in the list of 
126             # private ips until openstack bug is fixed.      
127             if addresses.get('private'):
128                 login = Login({'authentication': 'ssh-keys',
129                                'hostname': addresses.get('private')[-1]['addr'],
130                                'port':'22', 'username': 'root'})
131                 service = Services({'login': login})
132                 rspec_node['services'].append(service)    
133             
134             for private_ip in addresses.get('private', []):
135                 if_xrn = PlXrn(auth=self.driver.hrn, 
136                                interface='node%s' % (instance.hostId)) 
137                 if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
138                 if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
139                 interface = Interface({'component_id': if_xrn.urn,
140                                        'client_id': if_client_id,
141                                        'sliver_id': if_sliver_id})
142                 interface['ips'] =  [{'address': private_ip['addr'],
143                                      #'netmask': private_ip['network'],
144                                      'type': 'ipv%s' % str(private_ip['version'])}]
145                 rspec_node['interfaces'].append(interface) 
146             
147             # slivers always provide the ssh service
148             for public_ip in addresses.get('public', []):
149                 login = Login({'authentication': 'ssh-keys', 
150                                'hostname': public_ip['addr'], 
151                                'port':'22', 'username': 'root'})
152                 service = Services({'login': login})
153                 rspec_node['services'].append(service)
154             rspec_nodes.append(rspec_node)
155         return rspec_nodes
156
157     def get_aggregate_nodes(self):
158         zones = self.get_availability_zones()
159         # available sliver/instance/vm types
160         instances = self.driver.shell.nova_manager.flavors.list()
161         if isinstance(instances, dict):
162             instances = instances.values()
163         # available images
164         images = self.driver.shell.image_manager.get_images_detailed()
165         disk_images  = [image_to_rspec_disk_image(img) for img in images if img['container_format'] in ['ami', 'ovf']]
166         rspec_nodes = []
167         for zone in zones:
168             rspec_node = Node()
169             xrn = OSXrn(zone, type='node')
170             rspec_node['component_id'] = xrn.urn
171             rspec_node['component_name'] = xrn.name
172             rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
173             rspec_node['exclusive'] = 'false'
174             rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
175                                                 HardwareType({'name': 'pc'})]
176             slivers = []
177             for instance in instances:
178                 sliver = instance_to_sliver(instance)
179                 sliver['disk_image'] = disk_images
180                 slivers.append(sliver)
181         
182             rspec_node['slivers'] = slivers
183             rspec_nodes.append(rspec_node) 
184
185         return rspec_nodes 
186
187
188     def create_tenant(self, tenant_name):
189         tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
190         if not tenants:
191             self.driver.shell.auth_manager.tenants.create(tenant_name, tenant_name)
192             tenant = self.driver.shell.auth_manager.tenants.find(name=tenant_name)
193         else:
194             tenant = tenants[0]
195         return tenant
196             
197
198     def create_instance_key(self, slice_hrn, user):
199         slice_name = Xrn(slice_hrn).leaf
200         user_name = Xrn(user['urn']).leaf
201         key_name = "%s_%s" % (slice_name, user_name)
202         pubkey = user['keys'][0]
203         key_found = False
204         existing_keys = self.driver.shell.nova_manager.keypairs.findall(name=key_name)
205         for existing_key in existing_keys:
206             if existing_key.public_key != pubkey:
207                 self.driver.shell.nova_manager.keypairs.delete(existing_key)
208             elif existing_key.public_key == pubkey:
209                 key_found = True
210
211         if not key_found:
212             self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
213         return key_name       
214         
215
216     def create_security_group(self, slicename, fw_rules=[]):
217         # use default group by default
218         group_name = 'default' 
219         if isinstance(fw_rules, list) and fw_rules:
220             # Each sliver get's its own security group.
221             # Keep security group names unique by appending some random
222             # characters on end.
223             random_name = "".join([random.choice(string.letters+string.digits)
224                                            for i in xrange(6)])
225             group_name = slicename + random_name 
226             security_group = SecurityGroup(self.driver)
227             security_group.create_security_group(group_name)
228             for rule in fw_rules:
229                 security_group.add_rule_to_group(group_name, 
230                                              protocol = rule.get('protocol'), 
231                                              cidr_ip = rule.get('cidr_ip'), 
232                                              port_range = rule.get('port_range'), 
233                                              icmp_type_code = rule.get('icmp_type_code'))
234             # Open ICMP by default
235             security_group.add_rule_to_group(group_name,
236                                              protocol = "icmp",
237                                              cidr_ip = "0.0.0.0/0",
238                                              icmp_type_code = "-1:-1")
239         return group_name
240
241     def add_rule_to_security_group(self, group_name, **kwds):
242         security_group = SecurityGroup(self.driver)
243         security_group.add_rule_to_group(group_name=group_name, 
244                                          protocol=kwds.get('protocol'), 
245                                          cidr_ip =kwds.get('cidr_ip'), 
246                                          icmp_type_code = kwds.get('icmp_type_code'))
247
248  
249
250     def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
251         #logger.debug('Reserving an instance: image: %s, flavor: ' \
252         #            '%s, key: %s, name: %s' % \
253         #            (image_id, flavor_id, key_name, slicename))
254
255         # make sure a tenant exists for this slice
256         tenant = self.create_tenant(tenant_name)  
257
258         # add the sfa admin user to this tenant and update our nova client connection
259         # to use these credentials for the rest of this session. This emsures that the instances
260         # we create will be assigned to the correct tenant.
261         sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
262         user_role = self.driver.shell.auth_manager.roles.find(name='user')
263         admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
264         self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
265         self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
266         self.driver.shell.nova_manager.connect(tenant=tenant.name)  
267
268         authorized_keys = "\n".join(pubkeys)
269         files = {'/root/.ssh/authorized_keys': authorized_keys}
270         rspec = RSpec(rspec)
271         requested_instances = defaultdict(list)
272         # iterate over clouds/zones/nodes
273         for node in rspec.version.get_nodes_with_slivers():
274             instances = node.get('slivers', [])
275             if not instances:
276                 continue
277             for instance in instances:
278                 try: 
279                     metadata = {}
280                     flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
281                     image = instance.get('disk_image')
282                     if image and isinstance(image, list):
283                         image = image[0]
284                     else:
285                         raise InvalidRSpec("Must specify a disk_image for each VM")
286                     image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
287                     fw_rules = instance.get('fw_rules', [])
288                     group_name = self.create_security_group(instance_name, fw_rules)
289                     metadata['security_groups'] = group_name
290                     if node.get('component_id'):
291                         metadata['component_id'] = node['component_id']
292                     if node.get('client_id'):
293                         metadata['client_id'] = node['client_id']
294                     self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
295                                                             image=image_id,
296                                                             key_name = key_name,
297                                                             security_groups = [group_name],
298                                                             files=files,
299                                                             meta=metadata, 
300                                                             name=instance_name)
301                 except Exception, err:    
302                     logger.log_exc(err)                                
303                            
304
305
306     def delete_instances(self, instance_name, tenant_name):
307
308         def _delete_security_group(instance):
309             security_group = instance.metadata.get('security_groups', '')
310             if security_group:
311                 manager = SecurityGroup(self.driver)
312                 timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
313                 start_time = time.time()
314                 instance_deleted = False
315                 while instance_deleted == False and (time.time() - start_time) < timeout:
316                     inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
317                     if not inst:
318                         instance_deleted = True
319                     time.sleep(.5)
320                 manager.delete_security_group(security_group)
321
322         thread_manager = ThreadManager()
323         self.driver.shell.nova_manager.connect(tenant=tenant_name)
324         instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
325         for instance in instances:
326             # destroy instance
327             self.driver.shell.nova_manager.servers.delete(instance)
328             # deleate this instance's security groups
329             thread_manager.run(_delete_security_group, instance)
330         return True
331
332
333     def stop_instances(self, instance_name, tenant_name):
334         self.driver.shell.nova_manager.connect(tenant=tenant_name)
335         instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
336         for instance in instances:
337             self.driver.shell.nova_manager.servers.pause(instance)
338         return 1
339
340     def update_instances(self, project_name):
341         pass