X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fopenstack%2Fosaggregate.py;h=def366ffd20c6bcc636de0e7b2198dac03e05124;hb=1db1a879ffde8991aa95dd80142d555551655e88;hp=e7486e82bbeda38534cd7dd915632aca9d33c6fb;hpb=78467ecd9b02ec60d9572178995ed347fc917f59;p=sfa.git diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py index e7486e82..def366ff 100644 --- a/sfa/openstack/osaggregate.py +++ b/sfa/openstack/osaggregate.py @@ -1,3 +1,10 @@ + +import os +import socket +import base64 +import string +import random +from collections import defaultdict from nova.exception import ImageNotFound from nova.api.ec2.cloud import CloudController from sfa.util.faults import SfaAPIError @@ -8,19 +15,14 @@ from sfa.rspecs.elements.sliver import Sliver from sfa.rspecs.elements.login import Login from sfa.rspecs.elements.disk_image import DiskImage from sfa.rspecs.elements.services import Services +from sfa.rspecs.elements.interface import Interface from sfa.util.xrn import Xrn +from sfa.util.plxrn import PlXrn, hrn_to_pl_slicename from sfa.util.osxrn import OSXrn from sfa.rspecs.version_manager import VersionManager - - -def disk_image_to_rspec_object(image): - img = DiskImage() - img['name'] = image['name'] - img['description'] = image['name'] - img['os'] = image['name'] - img['version'] = image['name'] - return img - +from sfa.openstack.image import ImageManager +from sfa.openstack.security_group import SecurityGroup +from sfa.util.sfalogging import logger def instance_to_sliver(instance, slice_xrn=None): # should include? @@ -48,52 +50,25 @@ def instance_to_sliver(instance, slice_xrn=None): sliver = Sliver({'slice_id': sliver_id, 'name': name, - 'type': 'plos-' + type, + 'type': type, 'tags': []}) return sliver - + + +def ec2_id(id=None, type=None): + ec2_id = None + if type == 'ovf': + type = 'ami' + if id and type: + ec2_id = CloudController.image_ec2_id(id, type) + return ec2_id + class OSAggregate: def __init__(self, driver): self.driver = driver - def get_machine_image_details(self, image): - """ - Returns a dict that contains the ami, aki and ari details for the specified - ami image. - """ - disk_image = {} - if image['container_format'] == 'ami': - disk_image['ami'] = image - disk_image['aki'] = self.driver.shell.image_manager.show(image['kernel_id']) - disk_image['ari'] = self.driver.shell.image_manager.show(image['ramdisk_id']) - return disk_image - - def get_disk_image(self, id=None, name=None): - """ - Look up a image bundle using the specifeid id or name - """ - disk_image = None - try: - if id: - image = self.driver.shell.image_manager.show(image_id) - elif name: - image = self.driver.shell.image_manager.show_by_name(image_name) - if image['container_format'] == 'ami': - disk_image = self.get_machine_image_details(image) - except ImageNotFound: - pass - return disk_image - - def get_available_disk_images(self): - # get image records - disk_images = [] - for image in self.driver.shell.image_manager.detail(): - if image['container_format'] == 'ami': - disk_images.append(self.get_machine_image_details(image)) - return disk_images - def get_rspec(self, slice_xrn=None, version=None, options={}): version_manager = VersionManager() version = version_manager.get_version(version) @@ -107,37 +82,69 @@ class OSAggregate: rspec.version.add_nodes(nodes) return rspec.toxml() + def get_availability_zones(self): + zones = self.driver.shell.db.zone_get_all() + if not zones: + zones = ['cloud'] + else: + zones = [zone.name for zone in zones] + def get_slice_nodes(self, slice_xrn): - name = OSXrn(xrn = slice_xrn).name + image_manager = ImageManager(self.driver) + + zones = self.get_availability_zones() + name = hrn_to_pl_slicename(slice_xrn) instances = self.driver.shell.db.instance_get_all_by_project(name) rspec_nodes = [] for instance in instances: rspec_node = Node() - xrn = OSXrn(instance.hostname, 'node') - rspec_node['component_id'] = xrn.urn - rspec_node['component_name'] = xrn.name + interfaces = [] + for fixed_ip in instance.fixed_ips: + if_xrn = PlXrn(auth=self.driver.hrn, + interface='node%s:eth0' % (instance.hostname)) + interface = Interface({'component_id': if_xrn.urn}) + interface['ips'] = [{'address': fixed_ip['address'], + 'netmask': fixed_ip['network'].netmask, + 'type': 'ipv4'}] + interfaces.append(interface) + if instance.availability_zone: + node_xrn = OSXrn(instance.availability_zone, 'node') + else: + node_xrn = OSXrn('cloud', 'node') + + rspec_node['component_id'] = node_xrn.urn + rspec_node['component_name'] = node_xrn.name rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn() sliver = instance_to_sliver(instance) - disk_image = self.get_disk_image(instance.image_ref) - sliver['disk_images'] = [disk_image_to_rspec_object(disk_image)] + disk_image = image_manager.get_disk_image(instance.image_ref) + sliver['disk_image'] = [disk_image.to_rspec_object()] rspec_node['slivers'] = [sliver] + rspec_node['interfaces'] = interfaces + # slivers always provide the ssh service + hostname = None + for interface in interfaces: + if 'ips' in interface and interface['ips'] and \ + isinstance(interface['ips'], list): + if interface['ips'][0].get('address'): + hostname = interface['ips'][0].get('address') + break + login = Login({'authentication': 'ssh-keys', + 'hostname': hostname, + 'port':'22', 'username': 'root'}) + service = Services({'login': login}) + rspec_node['services'] = [service] rspec_nodes.append(rspec_node) return rspec_nodes def get_aggregate_nodes(self): - - zones = self.driver.shell.db.zone_get_all() - if not zones: - zones = ['cloud'] - else: - zones = [zone.name for zone in zones] - + zones = self.get_availability_zones() # available sliver/instance/vm types instances = self.driver.shell.db.instance_type_get_all().values() # available images - disk_images = self.get_available_disk_images() - disk_image_objects = [disk_image_to_rspec_object(image) \ - for image in disk_image] + image_manager = ImageManager(self.driver) + disk_images = image_manager.get_available_disk_images() + disk_image_objects = [image.to_rspec_object() \ + for image in disk_images] rspec_nodes = [] for zone in zones: rspec_node = Node() @@ -151,7 +158,7 @@ class OSAggregate: slivers = [] for instance in instances: sliver = instance_to_sliver(instance) - sliver['disk_images'] = disk_image_objects + sliver['disk_image'] = disk_image_objects slivers.append(sliver) rspec_node['slivers'] = slivers @@ -162,27 +169,10 @@ class OSAggregate: def create_project(self, slicename, users, options={}): """ - Create the slice if it doesn't alredy exist - """ - import nova.exception.ProjectNotFound - try: - slice = self.driver.shell.auth_manager.get_project(slicename) - except nova.exception.ProjectNotFound: - # convert urns to user names - usernames = [Xrn(user['urn']).get_leaf() for user in users] - # assume that the first user is the project manager - proj_manager = usernames[0] - self.driver.shell.auth_manager.create_project(slicename, proj_manager) - - def create_project_users(self, slicename, users, options={}): - """ - Add requested users to the specified slice. + Create the slice if it doesn't alredy exist. Create user + accounts that don't already exist """ - - # There doesn't seem to be an effcient way to - # look up all the users of a project, so lets not - # attempt to remove stale users . For now lets just - # ensure that the specified users exist + from nova.exception import ProjectNotFound, UserNotFound for user in users: username = Xrn(user['urn']).get_leaf() try: @@ -190,7 +180,13 @@ class OSAggregate: except nova.exception.UserNotFound: self.driver.shell.auth_manager.create_user(username) self.verify_user_keys(username, user['keys'], options) - + + try: + slice = self.driver.shell.auth_manager.get_project(slicename) + except ProjectNotFound: + # assume that the first user is the project manager + proj_manager = Xrn(users[0]['urn']).get_leaf() + self.driver.shell.auth_manager.create_project(slicename, proj_manager) def verify_user_keys(self, username, keys, options={}): """ @@ -207,7 +203,7 @@ class OSAggregate: key = {} key['user_id'] = username key['name'] = username - key['public'] = public_key + key['public_key'] = public_key self.driver.shell.db.key_pair_create(key) # remove old keys @@ -215,10 +211,39 @@ class OSAggregate: for key in existing_keys: if key.public_key in removed_pub_keys: self.driver.shell.db.key_pair_destroy(username, key.name) - + + + def create_security_group(self, slicename, fw_rules=[]): + # use default group by default + group_name = 'default' + if isinstance(fw_rules, list) and fw_rules: + # Each sliver get's its own security group. + # Keep security group names unique by appending some random + # characters on end. + random_name = "".join([random.choice(string.letters+string.digits) + for i in xrange(6)]) + group_name = slicename + random_name + security_group = SecurityGroup(self.driver) + security_group.create_security_group(group_name) + for rule in fw_rules: + security_group.add_rule_to_group(group_name, + protocol = rule.get('protocol'), + cidr_ip = rule.get('cidr_ip'), + port_range = rule.get('port_range'), + icmp_type_code = rule.get('icmp_type_code')) + return group_name + + def add_rule_to_security_group(self, group_name, **kwds): + security_group = SecurityGroup(self.driver) + security_group.add_rule_to_group(group_name=group_name, + protocol=kwds.get('protocol'), + cidr_ip =kwds.get('cidr_ip'), + icmp_type_code = kwds.get('icmp_type_code')) + + def reserve_instance(self, image_id, kernel_id, ramdisk_id, \ - instance_type, key_name, user_data): - conn = self.driver.euca_shell + instance_type, key_name, user_data, group_name): + conn = self.driver.euca_shell.get_euca_connection() logger.info('Reserving an instance: image: %s, kernel: ' \ '%s, ramdisk: %s, type: %s, key: %s' % \ (image_id, kernel_id, ramdisk_id, @@ -229,25 +254,36 @@ class OSAggregate: ramdisk_id=ramdisk_id, instance_type=instance_type, key_name=key_name, - user_data = user_data) - except EC2ResponseError, ec2RespError: - logger.log_exc(ec2RespError) + user_data = user_data, + security_groups=[group_name]) + #placement=zone, + #min_count=min_count, + #max_count=max_count, + + except Exception, err: + logger.log_exc(err) + def run_instances(self, slicename, rspec, keyname, pubkeys): """ - Create the instances thats requested in the rspec + Create the security groups and instances. """ # the default image to use for instnaces that dont # explicitly request an image. # Just choose the first available image for now. - available_images = self.get_available_disk_images() - default_image = self.get_disk_images()[0] - default_ami_id = CloudController.image_ec2_id(default_image['ami']['id']) - default_aki_id = CloudController.image_ec2_id(default_image['aki']['id']) - default_ari_id = CloudController.image_ec2_id(default_image['ari']['id']) + image_manager = ImageManager(self.driver) + available_images = image_manager.get_available_disk_images() + default_image_id = None + default_aki_id = None + default_ari_id = None + default_image = available_images[0] + default_image_id = ec2_id(default_image.id, default_image.container_format) + default_aki_id = ec2_id(default_image.kernel_id, 'aki') + default_ari_id = ec2_id(default_image.ramdisk_id, 'ari') # get requested slivers rspec = RSpec(rspec) + user_data = "\n".join(pubkeys) requested_instances = defaultdict(list) # iterate over clouds/zones/nodes for node in rspec.version.get_nodes_with_slivers(): @@ -255,17 +291,47 @@ class OSAggregate: if isinstance(instance_types, list): # iterate over sliver/instance types for instance_type in instance_types: - ami_id = default_ami_id + fw_rules = instance_type.get('fw_rules', []) + group_name = self.create_security_group(slicename, fw_rules) + ami_id = default_image_id aki_id = default_aki_id ari_id = default_ari_id - req_image = instance_type.get('disk_images') + req_image = instance_type.get('disk_image') if req_image and isinstance(req_image, list): req_image_name = req_image[0]['name'] - disk_image = self.get_disk_image(name=req_image_name) + disk_image = image_manager.get_disk_image(name=req_image_name) if disk_image: - ami_id = CloudController.image_ec2_id(disk_image['ami']['id']) - aki_id = CloudController.image_ec2_id(disk_image['aki']['id']) - ari_id = CloudController.image_ec2_id(disk_image['ari']['id']) + ami_id = ec2_id(disk_image.id, disk_image.container_format) + aki_id = ec2_id(disk_image.kernel_id, 'aki') + ari_id = ec2_id(disk_image.ramdisk_id, 'ari') # start the instance - self.reserve_instance(ami_id, aki_id, ari_id, \ - instance_type['name'], keyname, pubkeys) + self.reserve_instance(image_id=ami_id, + kernel_id=aki_id, + ramdisk_id=ari_id, + instance_type=instance_type['name'], + key_name=keyname, + user_data=user_data, + group_name=group_name) + + + def delete_instances(self, project_name): + instances = self.driver.shell.db.instance_get_all_by_project(project_name) + security_group_manager = SecurityGroup(self.driver) + for instance in instances: + # deleate this instance's security groups + for security_group in instance.security_groups: + # dont delete the default security group + if security_group.name != 'default': + security_group_manager.delete_security_group(security_group.name) + # destroy instance + self.driver.shell.db.instance_destroy(instance.id) + return 1 + + def stop_instances(self, project_name): + instances = self.driver.shell.db.instance_get_all_by_project(project_name) + for instance in instances: + self.driver.shell.db.instance_stop(instance.id) + return 1 + + def update_instances(self, project_name): + pass