import time
import datetime
-#
+
from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
- RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist, \
+ SfaInvalidArgument
+
from sfa.util.sfalogging import logger
from sfa.util.defaultdict import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
from sfa.util.cache import Cache
+from sfa.trust.credential import Credential
# used to be used in get_ticket
#from sfa.trust.sfaticket import SfaTicket
+
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
+
# the driver interface, mostly provides default behaviours
from sfa.managers.driver import Driver
from sfa.openstack.nova_shell import NovaShell
+from sfa.openstack.euca_shell import EucaShell
from sfa.openstack.osaggregate import OSAggregate
-from sfa.plc.plslices import PlSlices
+from sfa.planetlab.plslices import PlSlices
from sfa.util.osxrn import OSXrn
# can be sent as-is; it takes care of authentication
# from the global config
#
-class OpenstackDriver (Driver):
+class NovaDriver (Driver):
# the cache instance is a class member so it survives across incoming requests
cache = None
def __init__ (self, config):
Driver.__init__ (self, config)
self.shell = NovaShell (config)
+ self.euca_shell = EucaShell(config)
self.cache=None
if config.SFA_AGGREGATE_CACHING:
- if OpenstackDriver.cache is None:
- OpenstackDriver.cache = Cache()
- self.cache = OpenstackDriver.cache
+ if NovaDriver.cache is None:
+ NovaDriver.cache = Cache()
+ self.cache = NovaDriver.cache
########################################
########## registry oriented
##########
def register (self, sfa_record, hrn, pub_key):
type = sfa_record['type']
- pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
-
+
+ #pl_record = self.sfa_fields_to_pl_fields(type dd , hrn, sfa_record)
+
if type == 'slice':
- acceptable_fields=['url', 'instantiation', 'name', 'description']
# add slice description, name, researchers, PI
- pass
+ name = Xrn(hrn).get_leaf()
+ researchers = sfa_record.get('researchers', [])
+ pis = sfa_record.get('pis', [])
+ project_manager = None
+ description = sfa_record.get('description', None)
+ if pis:
+ project_manager = Xrn(pis[0], 'user').get_leaf()
+ elif researchers:
+ project_manager = Xrn(researchers[0], 'user').get_leaf()
+ if not project_manager:
+ err_string = "Cannot create a project without a project manager. " + \
+ "Please specify at least one PI or researcher for project: " + \
+ name
+ raise SfaInvalidArgument(err_string)
+
+ users = [Xrn(user, 'user').get_leaf() for user in \
+ pis + researchers]
+ self.shell.auth_manager.create_project(name, project_manager, description, users)
elif type == 'user':
# add person roles, projects and keys
- pass
- return pointer
+ name = Xrn(hrn).get_leaf()
+ self.shell.auth_manager.create_user(name)
+ projects = sfa_records.get('slices', [])
+ for project in projects:
+ project_name = Xrn(project).get_leaf()
+ self.shell.auth_manager.add_to_project(name, project_name)
+ keys = sfa_records.get('keys', [])
+ for key in keys:
+ key_dict = {
+ 'user_id': name,
+ 'name': name,
+ 'public': key,
+ }
+ self.shell.db.key_pair_create(key_dict)
+
+ return name
##########
# xxx actually old_sfa_record comes filled with plc stuff as well in the original code
def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
- pointer = old_sfa_record['pointer']
- type = old_sfa_record['type']
-
+ type = new_sfa_record['type']
+
# new_key implemented for users only
if new_key and type not in [ 'user' ]:
raise UnknownSfaType(type)
elif type == "slice":
- # can update description, researchers and PI
- pass
+ # can update project manager and description
+ name = Xrn(hrn).get_leaf()
+ researchers = sfa_record.get('researchers', [])
+ pis = sfa_record.get('pis', [])
+ project_manager = None
+ description = sfa_record.get('description', None)
+ if pis:
+ project_manager = Xrn(pis[0], 'user').get_leaf()
+ elif researchers:
+ project_manager = Xrn(researchers[0], 'user').get_leaf()
+ self.shell.auth_manager.modify_project(name, project_manager, description)
+
elif type == "user":
- # can update slices, keys and roles
+ # can techinally update access_key and secret_key,
+ # but that is not in our scope, so we do nothing.
pass
return True
type=sfa_record['type']
name = Xrn(sfa_record['hrn']).get_leaf()
if type == 'user':
- if self.shell.user_get(name):
- self.shell.user_delete(name)
+ if self.shell.auth_manager.get_user(name):
+ self.shell.auth_manager.delete_user(name)
elif type == 'slice':
- if self.shell.project_get(name):
- self.shell.project_delete(name)
+ if self.shell.auth_manager.get_project(name):
+ self.shell.auth_manager.delete_project(name)
return True
name = Xrn(record['hrn']).get_leaf()
os_record = None
if record['type'] == 'user':
- os_record = self.shell.user_get(name)
+ os_record = self.shell.auth_manager.get_user(name)
+ projects = self.shell.db.project_get_by_user(name)
record['slices'] = [self.hrn + "." + proj.name for \
- proj in os_record.projects]
- record['roles'] = [role for role in os_record.roles]
- keys = self.shell.key_pair_get_all_by_user(name)
+ proj in projects]
+ record['roles'] = self.shell.db.user_get_roles(name)
+ keys = self.shell.db.key_pair_get_all_by_user(name)
record['keys'] = [key.public_key for key in keys]
elif record['type'] == 'slice':
- os_record = self.shell.project_get(name)
+ os_record = self.shell.auth_manager.get_project(name)
record['description'] = os_record.description
- record['PI'] = self.hrn + "." + os_record.project_manager
+ record['PI'] = [self.hrn + "." + os_record.project_manager.name]
record['geni_creator'] = record['PI']
- record['researcher'] = [self.hrn + "." + user.name for \
- user in os_record.members]
+ record['researcher'] = [self.hrn + "." + user for \
+ user in os_record.member_ids]
else:
continue
record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
record['geni_certificate'] = record['gid']
record['name'] = os_record.name
- if os_record.created_at is not None:
- record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
- if os_record.updated_at is not None:
- record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
+ #if os_record.created_at is not None:
+ # record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
+ #if os_record.updated_at is not None:
+ # record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
return records
return slices
# get data from db
- slices = self.shell.project_get_all()
- slice_urns = [OSXrn(name, 'slice').urn for name in slice]
+ projs = self.shell.auth_manager.get_projects()
+ slice_urns = [OSXrn(proj.name, 'slice').urn for proj in projs]
# cache the result
if self.cache:
def sliver_status (self, slice_urn, slice_hrn):
# find out where this slice is currently running
- slicename = hrn_to_pl_slicename(slice_hrn)
-
- slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
- if len(slices) == 0:
- raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
- slice = slices[0]
-
- # report about the local nodes only
- nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
- ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
-
- if len(nodes) == 0:
+ project_name = Xrn(slice_urn).get_leaf()
+ project = self.shell.auth_manager.get_project(project_name)
+ instances = self.shell.db.instance_get_all_by_project(project_name)
+ if len(instances) == 0:
raise SliverDoesNotExist("You have not allocated any slivers here")
-
- site_ids = [node['site_id'] for node in nodes]
-
+
result = {}
top_level_status = 'unknown'
- if nodes:
+ if instances:
top_level_status = 'ready'
result['geni_urn'] = slice_urn
- result['pl_login'] = slice['name']
- result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+ result['plos_login'] = 'root'
+ result['plos_expires'] = None
resources = []
- for node in nodes:
+ for instance in instances:
res = {}
- res['pl_hostname'] = node['hostname']
- res['pl_boot_state'] = node['boot_state']
- res['pl_last_contact'] = node['last_contact']
- if node['last_contact'] is not None:
-
- res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
- sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id'])
+ # instances are accessed by ip, not hostname. We need to report the ip
+ # somewhere so users know where to ssh to.
+ res['plos_hostname'] = instance.hostname
+ res['plos_created_at'] = datetime_to_string(utcparse(instance.created_at))
+ res['plos_boot_state'] = instance.vm_state
+ res['plos_sliver_type'] = instance.instance_type.name
+ sliver_id = Xrn(slice_urn).get_sliver_id(instance.project_id, \
+ instance.hostname, instance.id)
res['geni_urn'] = sliver_id
- if node['boot_state'] == 'boot':
- res['geni_status'] = 'ready'
+
+ if instance.vm_state == 'running':
+ res['boot_state'] = 'ready';
else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
-
+ res['boot_state'] = 'unknown'
resources.append(res)
result['geni_status'] = top_level_status
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ project_name = get_leaf(slice_hrn)
aggregate = OSAggregate(self)
- slicename = get_leaf(slice_hrn)
-
# parse rspec
rspec = RSpec(rspec_string)
- requested_attributes = rspec.version.get_slice_attributes()
-
- # ensure slice record exists
- slice = aggregate.verify_slice(slicename, options=options)
+
+ # ensure project and users exist in local db
+ aggregate.create_project(project_name, users, options=options)
+
+ # collect publick keys
+ pubkeys = []
+ project_key = None
+ for user in users:
+ pubkeys.extend(user['keys'])
+ # assume first user is the caller and use their context
+ # for the ec2/euca api connection. Also, use the first users
+ # key as the project key.
+ if not project_key:
+ username = Xrn(user['urn']).get_leaf()
+ user_keys = self.shell.db.key_pair_get_all_by_user(username)
+ if user_keys:
+ project_key = user_keys[0].name
+
# ensure person records exists
- persons = aggregate.verify_slice_users(slicename, users, options=options)
- # add/remove slice from nodes
- slices.verify_instances(slicename, rspec)
+ self.euca_shell.init_context(project_name)
+ aggregate.run_instances(project_name, rspec_string, project_key, pubkeys)
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ # we need to do this using the context of one of the slice users
+ project_name = Xrn(slice_urn).get_leaf()
+ self.euca_shell.init_context(project_name)
name = OSXrn(xrn=slice_urn).name
- slice = self.shell.project_get(name)
- if not slice:
- return 1
-
- self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
- instances = self.shell.instance_get_all_by_project(name)
- for instance in instances:
- self.shell.instance_destroy(instance.instance_id)
- return 1
+ aggregate = OSAggregate(self)
+ return aggregate.delete_instances(name)
+
+ def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
+ name = OSXrn(xrn=slice_urn).name
+ aggregate = OSAggregate(self)
+ return aggregate.update_instances(name)
def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
return True
def stop_slice (self, slice_urn, slice_hrn, creds):
name = OSXrn(xrn=slice_urn).name
- slice = self.shell.project_get(name)
- if not slice:
- return 1
+ aggregate = OSAggregate(self)
+ return aggregate.stop_instances(name)
- self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
- instances = self.shell.instance_get_all_by_project(name)
- for instance in instances:
- self.shell.instance_stop(instance.instance_id)
- return 1
-
def reset_slice (self, slice_urn, slice_hrn, creds):
raise SfaNotImplemented ("reset_slice not available at this interface")