- # There doesn't seem to be an effcient way to
- # look up all the users of a project, so lets not
- # attempt to remove stale users . For now lets just
- # ensure that the specified users exist
- for user in users:
- username = Xrn(user['urn']).get_leaf()
- try:
- self.driver.shell.auth_manager.get_user(username)
- except nova.exception.UserNotFound:
- self.driver.shell.auth_manager.create_user(username)
- self.verify_user_keys(username, user['keys'], options)
-
- def verify_user_keys(self, username, keys, options={}):
- """
- Add requested keys.
- """
- append = options.get('append', True)
- existing_keys = self.driver.shell.db.key_pair_get_all_by_user(username)
- existing_pub_keys = [key.public_key for key in existing_keys]
- removed_pub_keys = set(existing_pub_keys).difference(keys)
- added_pub_keys = set(keys).difference(existing_pub_keys)
-
- # add new keys
- for public_key in added_pub_keys:
- key = {}
- key['user_id'] = username
- key['name'] = username
- key['public'] = public_key
- self.driver.shell.db.key_pair_create(key)
-
- # remove old keys
- if not append:
- for key in existing_keys:
- if key.public_key in removed_pub_keys:
- self.driver.shell.db.key_pair_destroy(username, key.name)
-
- def verify_instances(self, slicename, rspec):
- rsepc = RSpec(rspec)
- nodes = rspec.version.get_nodes_with_slivers()
- old_instances = self.driver.shell.db.instance_get_all_by_project(name)
- for node in nodes:
- for slivers in node.get('slivers', []):
- pass
- # get instance type
- # get image
- # start instance
+
+ def create_security_group(self, slicename, fw_rules=[]):
+ # use default group by default
+ group_name = 'default'
+ if isinstance(fw_rules, list) and fw_rules:
+ # Each sliver get's its own security group.
+ # Keep security group names unique by appending some random
+ # characters on end.
+ random_name = "".join([random.choice(string.letters+string.digits)
+ for i in xrange(6)])
+ group_name = slicename + random_name
+ security_group = SecurityGroup(self.driver)
+ security_group.create_security_group(group_name)
+ for rule in fw_rules:
+ security_group.add_rule_to_group(group_name,
+ protocol = rule.get('protocol'),
+ cidr_ip = rule.get('cidr_ip'),
+ port_range = rule.get('port_range'),
+ icmp_type_code = rule.get('icmp_type_code'))
+ # Open ICMP by default
+ security_group.add_rule_to_group(group_name,
+ protocol = "icmp",
+ cidr_ip = "0.0.0.0/0",
+ icmp_type_code = "-1:-1")
+ return group_name
+
+ def add_rule_to_security_group(self, group_name, **kwds):
+ security_group = SecurityGroup(self.driver)
+ security_group.add_rule_to_group(group_name=group_name,
+ protocol=kwds.get('protocol'),
+ cidr_ip =kwds.get('cidr_ip'),
+ icmp_type_code = kwds.get('icmp_type_code'))
+
+
+
+ def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
+ #logger.debug('Reserving an instance: image: %s, flavor: ' \
+ # '%s, key: %s, name: %s' % \
+ # (image_id, flavor_id, key_name, slicename))
+
+ # make sure a tenant exists for this slice
+ tenant = self.create_tenant(tenant_name)
+
+ # add the sfa admin user to this tenant and update our nova client connection
+ # to use these credentials for the rest of this session. This emsures that the instances
+ # we create will be assigned to the correct tenant.
+ sfa_admin_user = self.driver.shell.auth_manager.users.find(name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
+ user_role = self.driver.shell.auth_manager.roles.find(name='user')
+ admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
+ self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
+ self.driver.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
+ self.driver.shell.nova_manager.connect(tenant=tenant.name)
+
+ authorized_keys = "\n".join(pubkeys)
+ files = {'/root/.ssh/authorized_keys': authorized_keys}
+ rspec = RSpec(rspec)
+ requested_instances = defaultdict(list)
+
+ # iterate over clouds/zones/nodes
+ slivers = []
+ for node in rspec.version.get_nodes_with_slivers():
+ instances = node.get('slivers', [])
+ if not instances:
+ continue
+ for instance in instances:
+ try:
+ metadata = {}
+ flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance['name'])
+ image = instance.get('disk_image')
+ if image and isinstance(image, list):
+ image = image[0]
+ else:
+ raise InvalidRSpec("Must specify a disk_image for each VM")
+ image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
+ fw_rules = instance.get('fw_rules', [])
+ group_name = self.create_security_group(instance_name, fw_rules)
+ metadata['security_groups'] = group_name
+ if node.get('component_id'):
+ metadata['component_id'] = node['component_id']
+ if node.get('client_id'):
+ metadata['client_id'] = node['client_id']
+ server = self.driver.shell.nova_manager.servers.create(
+ flavor=flavor_id,
+ image=image_id,
+ key_name = key_name,
+ security_groups = [group_name],
+ files=files,
+ meta=metadata,
+ name=instance_name)
+ slivers.append(server)
+ except Exception, err:
+ logger.log_exc(err)
+
+ return slivers
+
+ def delete_instance(self, instance):
+
+ def _delete_security_group(inst):
+ security_group = inst.metadata.get('security_groups', '')
+ if security_group:
+ manager = SecurityGroup(self.driver)
+ timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
+ start_time = time.time()
+ instance_deleted = False
+ while instance_deleted == False and (time.time() - start_time) < timeout:
+ tmp_inst = self.driver.shell.nova_manager.servers.findall(id=inst.id)
+ if not tmp_inst:
+ instance_deleted = True
+ time.sleep(.5)
+ manager.delete_security_group(security_group)
+
+ thread_manager = ThreadManager()
+ tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id)
+ self.driver.shell.nova_manager.connect(tenant=tenant.name)
+ args = {'name': instance.name,
+ 'id': instance.id}
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
+ security_group_manager = SecurityGroup(self.driver)
+ for instance in instances:
+ # destroy instance
+ self.driver.shell.nova_manager.servers.delete(instance)
+ # deleate this instance's security groups
+ thread_manager.run(_delete_security_group, instance)
+ return 1
+
+ def stop_instances(self, instance_name, tenant_name, id=None):
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
+ for instance in instances:
+ self.driver.shell.nova_manager.servers.pause(instance)
+ return 1
+
+ def start_instances(self, instance_name, tenant_name, id=None):
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
+ for instance in instances:
+ self.driver.shell.nova_manager.servers.resume(instance)
+ return 1
+
+ def restart_instances(self, instacne_name, tenant_name, id=None):
+ self.stop_instances(instance_name, tenant_name, id)
+ self.start_instances(instance_name, tenant_name, id)
+ return 1
+
+ def update_instances(self, project_name):
+ pass