def __init__ (self, config):
Driver.__init__(self, config)
- self.shell = Shell(config)
+ self.shell = Shell(config=config)
self.cache=None
if config.SFA_AGGREGATE_CACHING:
if NovaDriver.cache is None:
for researcher in researchers:
name = Xrn(researcher).get_leaf()
user = self.shell.auth_manager.users.find(name=name)
+ self.shell.auth_manager.roles.add_user_role(user, 'Member', tenant)
self.shell.auth_manager.roles.add_user_role(user, 'user', tenant)
+
pis = sfa_record.get('pis', [])
for pi in pis:
return slices
# get data from db
- projs = self.shell.auth_manager.get_projects()
- slice_urns = [OSXrn(proj.name, 'slice').urn for proj in projs]
-
+ instance_urns = []
+ instances = self.shell.nova_manager.servers.findall()
+ for instance in instances:
+ if instance.name not in instance_urns:
+ instance_urns.append(OSXrn(instance.name, type='slice').urn)
+
# cache the result
if self.cache:
logger.debug ("OpenStackDriver.list_slices stores value in cache")
- self.cache.add('slices', slice_urns)
+ self.cache.add('slices', instance_urns)
- return slice_urns
+ return instance_urns
# first 2 args are None in case of resource discovery
def list_resources (self, slice_urn, slice_hrn, creds, options):
def sliver_status (self, slice_urn, slice_hrn):
# find out where this slice is currently running
project_name = hrn_to_os_slicename(slice_hrn)
- project = self.shell.auth_manager.get_project(project_name)
- instances = self.shell.db.instance_get_all_by_project(project_name)
+ instances = self.shell.nova_manager.servers.findall(name=project_name)
if len(instances) == 0:
raise SliverDoesNotExist("You have not allocated any slivers here")
if instances:
top_level_status = 'ready'
result['geni_urn'] = slice_urn
- result['plos_login'] = 'root'
+ result['plos_login'] = 'root'
+ # do we need real dates here?
result['plos_expires'] = None
+ result['geni_expires'] = None
resources = []
for instance in instances:
res = {}
# instances are accessed by ip, not hostname. We need to report the ip
# somewhere so users know where to ssh to.
- res['plos_hostname'] = instance.hostname
- res['plos_created_at'] = datetime_to_string(utcparse(instance.created_at))
- res['plos_boot_state'] = instance.vm_state
- res['plos_sliver_type'] = instance.instance_type.name
- sliver_id = Xrn(slice_urn).get_sliver_id(instance.project_id, \
- instance.hostname, instance.id)
+ res['geni_expires'] = None
+ #res['plos_hostname'] = instance.hostname
+ res['plos_created_at'] = datetime_to_string(utcparse(instance.created))
+ res['plos_boot_state'] = instance.status
+ res['plos_sliver_type'] = self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name
+ sliver_id = Xrn(slice_urn).get_sliver_id(instance.id)
res['geni_urn'] = sliver_id
- if instance.vm_state == 'running':
+ if instance.status.lower() == 'active':
res['boot_state'] = 'ready'
res['geni_status'] = 'ready'
else:
aggregate = OSAggregate(self)
rspec = RSpec(rspec_string)
instance_name = hrn_to_os_slicename(slice_hrn)
+
+ # make sure a tenant exists for this slice
+ tenant = aggregate.create_tenant(slice_hrn)
+
+ # add the sfa admin user to this tenant and update our nova client connection
+ # to use these credentials for the rest of this session. This emsures that the instances
+ # we create will be assigned to the correct tenant.
+ sfa_admin_user = self.shell.auth_manager.users.find(name=self.shell.auth_manager.opts['OS_USERNAME'])
+ user_role = self.shell.auth_manager.roles.find(name='user')
+ admin_role = self.shell.auth_manager.roles.find(name='admin')
+ self.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
+ self.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
+ self.shell.nova_manager.connect(tenant=tenant.name)
# assume first user is the caller and use their context
# for the ec2/euca api connection. Also, use the first users