def testbed_name (self): return "openstack"
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
def aggregate_version (self):
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- return {
- 'testbed':self.testbed_name(),
- 'geni_request_rspec_versions': request_rspec_versions,
- 'geni_ad_rspec_versions': ad_rspec_versions,
- }
+ return {}
def list_slices (self, creds, options):
- # look in cache first
- if self.cache:
- slices = self.cache.get('slices')
- if slices:
- logger.debug("OpenStackDriver.list_slices returns from cache")
- return slices
-
# get data from db
instance_urns = []
instances = self.shell.nova_manager.servers.findall()
for instance in instances:
if instance.name not in instance_urns:
instance_urns.append(OSXrn(instance.name, type='slice').urn)
-
- # cache the result
- if self.cache:
- logger.debug ("OpenStackDriver.list_slices stores value in cache")
- self.cache.add('slices', instance_urns)
-
return instance_urns
# first 2 args are None in case of resource discovery
- def list_resources (self, slice_urn, slice_hrn, creds, options):
- cached_requested = options.get('cached', True)
-
- version_manager = VersionManager()
- # get the rspec's return format from options
- rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
- version_string = "rspec_%s" % (rspec_version)
-
- #panos adding the info option to the caching key (can be improved)
- if options.get('info'):
- version_string = version_string + "_"+options.get('info', 'default')
-
- # look in cache first
- if cached_requested and self.cache and not slice_hrn:
- rspec = self.cache.get(version_string)
- if rspec:
- logger.debug("OpenStackDriver.ListResources: returning cached advertisement")
- return rspec
-
- #panos: passing user-defined options
- #print "manager options = ",options
+ def list_resources (self, creds, version, options):
aggregate = OSAggregate(self)
- rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
- options=options)
-
- # cache the result
- if self.cache and not slice_hrn:
- logger.debug("OpenStackDriver.ListResources: stores advertisement in cache")
- self.cache.add(version_string, rspec)
-
+ rspec = aggregate.list_resources(version=version, options=options)
return rspec
+
+ def describe(self, creds, urns, version, options):
+ aggregate = OSAggregate(self)
+ return aggregate.describe(urns, version=version, options=options)
def sliver_status (self, slice_urn, slice_hrn):
+ # update nova connection
+ tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
+ self.shell.nova_manager.connect(tenant=tenant_name)
+
# find out where this slice is currently running
project_name = hrn_to_os_slicename(slice_hrn)
instances = self.shell.nova_manager.servers.findall(name=project_name)
raise SliverDoesNotExist("You have not allocated any slivers here")
result = {}
- top_level_status = 'unknown'
- if instances:
- top_level_status = 'ready'
result['geni_urn'] = slice_urn
result['plos_login'] = 'root'
# do we need real dates here?
else:
res['boot_state'] = 'unknown'
res['geni_status'] = 'unknown'
+ res['geni_allocation_status'] = 'geni_provisioned'
resources.append(res)
- result['geni_status'] = top_level_status
result['geni_resources'] = resources
return result
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
aggregate = OSAggregate(self)
- rspec = RSpec(rspec_string)
- instance_name = hrn_to_os_slicename(slice_hrn)
- # make sure a tenant exists for this slice
- tenant = aggregate.create_tenant(slice_hrn)
-
- # add the sfa admin user to this tenant and update our nova client connection
- # to use these credentials for the rest of this session. This emsures that the instances
- # we create will be assigned to the correct tenant.
- sfa_admin_user = self.shell.auth_manager.users.find(name=self.shell.auth_manager.opts['OS_USERNAME'])
- user_role = self.shell.auth_manager.roles.find(name='user')
- admin_role = self.shell.auth_manager.roles.find(name='admin')
- self.shell.auth_manager.roles.add_user_role(sfa_admin_user, admin_role, tenant)
- self.shell.auth_manager.roles.add_user_role(sfa_admin_user, user_role, tenant)
- self.shell.nova_manager.connect(tenant=tenant.name)
-
# assume first user is the caller and use their context
# for the ec2/euca api connection. Also, use the first users
# key as the project key.
for user in users:
pubkeys.extend(user['keys'])
- aggregate.run_instances(instance_name, rspec_string, key_name, pubkeys)
+ rspec = RSpec(rspec_string)
+ instance_name = hrn_to_os_slicename(slice_hrn)
+ tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
+ aggregate.run_instances(instance_name, tenant_name, rspec_string, key_name, pubkeys)
- return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+ return aggregate.describe(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
aggregate = OSAggregate(self)
+ tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
project_name = hrn_to_os_slicename(slice_hrn)
- return aggregate.delete_instances(project_name)
+ return aggregate.delete_instances(project_name, tenant_name)
def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
name = hrn_to_os_slicename(slice_hrn)
+ tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
aggregate = OSAggregate(self)
return aggregate.update_instances(name)
return 1
def stop_slice (self, slice_urn, slice_hrn, creds):
+ tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
name = OSXrn(xrn=slice_urn).name
aggregate = OSAggregate(self)
- return aggregate.stop_instances(name)
+ return aggregate.stop_instances(name, tenant_name)
def reset_slice (self, slice_urn, slice_hrn, creds):
raise SfaNotImplemented ("reset_slice not available at this interface")