from sfa.trust.credential import Credential
from sfa.trust.gid import GID
-from sfa.storage.record import SfaRecord
+from sfa.storage.record import Record
def determine_sfa_filekind(fn):
elif kind=="credential":
cred = Credential(filename = filename)
print '--------------------',filename,'IS A',kind
- cred.dump(dump_parents = options.dump_parents)
+ cred.dump(dump_parents = options.dump_parents, show_xml=options.show_xml)
if options.extract_gids:
print '--------------------',filename,'embedded GIDS'
extract_gids(cred, extract_parents = options.dump_parents)
parser.add_option("-g", "--extract-gids", action="store_true", dest="extract_gids", default=False, help="Extract GIDs from credentials")
parser.add_option("-p", "--dump-parents", action="store_true", dest="dump_parents", default=False, help="Show parents")
parser.add_option("-e", "--extensions", action="store_true", dest="show_extensions", default="False", help="Show certificate extensions")
- parser.add_option("-v", "--verbose", action='count', dest='verbose', default=0)
+ parser.add_option("-v", "--verbose", action='count', dest='verbose', default=0, help="More and more verbose")
+ parser.add_option("-x", "--xml", action='store_true', dest='show_xml', default=False, help="dumps xml tree (cred. only)")
(options, args) = parser.parse_args()
logger.setLevelFromOptVerbose(options.verbose)
</variablelist>
</category>
+ <!-- ======================================== -->
+ <category id="sfa_nitos">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the NITOS testbed.</description>
+
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>http://195.251.17.239:8080/RPC2</value>
+ <description>URL for the NITOS Scheduler xmlrpc API</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+
</variables>
<comps>
"SFA_AGGREGATE_HOST",
"SFA_SM_HOST",
"SFA_DB_HOST",
- "SFA_PLC_URL",
- "SFA_PLC_USER",
- "SFA_PLC_PASSWORD",
]
+flavour_xml_section_hash = { \
+ 'pl':'sfa_plc',
+ 'openstack':'sfa_nova',
+ 'fd':'sfa_federica',
+ 'nitos':'sfa_nitos'
+ }
configuration={ \
'name':'sfa',
'service':"sfa",
(service,service))
elif command in "uU":
global usual_variables
+ global flavour_xml_section_hash
try:
for varname in usual_variables:
(category,variable) = cdef.locate_varname(varname)
if not (category is None and variable is None):
prompt_variable(cdef, cread, cwrite, category, variable, False)
+
+ # set the driver variable according to the already set flavour
+ generic_flavour = cwrite.items('sfa')[0][1]
+ for section in cdef.sections():
+ if generic_flavour in flavour_xml_section_hash and flavour_xml_section_hash[generic_flavour] == section:
+ for item in cdef.items(section):
+ category = section
+ variable = item[0]
+ prompt_variable(cdef, cread, cwrite, category, variable, False)
+ break
+
except Exception, inst:
if (str(inst) != 'BailOut'):
raise
'sfa/rspecs/versions',
'sfa/client',
'sfa/planetlab',
+ 'sfa/nitos',
'sfa/openstack',
'sfa/federica',
'sfatables',
%define name sfa
%define version 2.1
-%define taglevel 13
+%define taglevel 16
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Group: Applications/System
Requires: sfa
+%package nitos
+Summary: the SFA layer around NITOS
+Group: Applications/System
+Requires: sfa
+
%package sfatables
Summary: sfatables policy tool for SFA
Group: Applications/System
%description federica
The SFA driver for FEDERICA.
+%description nitos
+The SFA driver for NITOS.
+
%description sfatables
sfatables is a tool for defining access and admission control policies
in an SFA network, in much the same way as iptables is for ip
%files federica
%{python_sitelib}/sfa/federica
+%files nitos
+%{python_sitelib}/sfa/nitos
+
%files sfatables
/etc/sfatables/*
%{_bindir}/sfatables
[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Mon Oct 01 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-16
+- various tweaks for the nitos driver
+
+* Wed Sep 26 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-15
+- first stab at a driver for the NITOS/OMF testbed (sep. pkg)
+- deeper cleanup of the data-dependencies between SFA and the testbed
+- in particular, sfi create issues Resolve(details=False)
+- for that purpose, Resolve exposes reg-* keys for SFA builtins
+- which in turn allows sfi list to show PIs, slice members and keys
+- NOTE: sfa-config-tty is known to be broken w/ less frequently used func's
+- Shows stacktrace when startup fails (DB conn, wrong flavour, etc..)
+
+* Mon Sep 17 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-14
+- configurable data-dir (/var/lib/sfa)
+- no more dependent on myplc-config
+- some support for hrns with _ instead of \.
+- fix for PL importing in presence of gpg keys
+- DeleteSliver returns True instead of 1 in case of success
+- Various improvements on the openstack/nova side
+- new package sfa-nitos
+
* Wed Jul 11 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-13
- bugfix that prevented to call 'sfi create' - (was broken in sfa-2.1-12)
- sfi to remove expired credentials
-import sys
+###
+#
+# Thierry - 2012 sept 21
+#
+# it seems terribly wrong that the client should decide to use PG- or PL- related code
+# esp. in a context where we're trying to have more and more kinds of testbeds involved
+#
+# also, the 'users' filed that CreateSliver is expecting (the key point here is to get this right)
+# is specified to have at least a urn and a list of keys, both of these being supported natively
+# in the sfa db
+# So long story short, it seems to me that we should have a common code that fills in 'urn' and 'keys'
+# and then code that tentatively tries to add as much extra info that we can get on these users
+#
+# the fact e.g. that PlanetLab insists on getting a first_name and last_name is not
+# exactly consistent with the GENI spec. of CreateSliver
+#
def pg_users_arg(records):
users = []
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'],
- 'keys': record['keys']}
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
+ }
users.append(user)
return users
-def sfa_users_arg(records, slice_record):
+def sfa_users_arg (records, slice_record):
users = []
- print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg slice_record %s \r\n records %s"%(slice_record,records)
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'],
- 'keys': record['keys'],
- 'email': record['email'], # needed for MyPLC
- 'person_id': record['record_id'],
- 'hrn': record['hrn'],
- 'type': record['type'],
- 'authority' : record['authority'],
- 'gid' : record['gid'],
- 'first_name': record['first_name'], # needed for MyPLC
- 'last_name': record['last_name'], # needed for MyPLC
- 'slice_record': slice_record, # needed for legacy refresh peer
- 'key_ids': record['key_ids'] # needed for legacy refresh peer
- }
- users.append(user)
- print>>sys.stderr, " \r\n \r\n \t CLIENT_HELPER.PY sfa_users_arg user %s",user
- return users
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
+ 'slice_record': slice_record,
+ }
+ # fill as much stuff as possible from planetlab or similar
+ # note that reg-email is not yet available
+ pl_fields = ['email', 'person_id', 'first_name', 'last_name', 'key_ids']
+ nitos_fields = [ 'email', 'user_id' ]
+ extra_fields = list ( set(pl_fields).union(set(nitos_fields)))
+ # try to fill all these in
+ for field in extra_fields:
+ if record.has_key(field): user[field]=record[field]
+ users.append(user)
+
+ return users
def sfa_to_pg_users_arg(users):
choices=('text', 'xml', 'simple'), help='display record in different formats')
def show(self, xrn, type=None, format=None, outfile=None):
"""Display details for a registered object"""
- records = self.api.manager.Resolve(self.api, xrn, type, True)
+ records = self.api.manager.Resolve(self.api, xrn, type, details=True)
for record in records:
sfa_record = Record(dict=record)
sfa_record.dump(format)
importer.run()
def sync_db(self):
- """Initiailize or upgrade the db"""
+ """Initialize or upgrade the db"""
from sfa.storage.dbschema import DBSchema
dbschema=DBSchema()
dbschema.init_or_upgrade
f.close()
return
+# used in sfi list
+def terminal_render (records,options):
+ # sort records by type
+ grouped_by_type={}
+ for record in records:
+ type=record['type']
+ if type not in grouped_by_type: grouped_by_type[type]=[]
+ grouped_by_type[type].append(record)
+ group_types=grouped_by_type.keys()
+ group_types.sort()
+ for type in group_types:
+ group=grouped_by_type[type]
+# print 20 * '-', type
+ try: renderer=eval('terminal_render_'+type)
+ except: renderer=terminal_render_default
+ for record in group: renderer(record,options)
+
+def render_plural (how_many, name,names=None):
+ if not names: names="%ss"%name
+ if how_many<=0: return "No %s"%name
+ elif how_many==1: return "1 %s"%name
+ else: return "%d %s"%(how_many,names)
+
+def terminal_render_default (record,options):
+ print "%s (%s)" % (record['hrn'], record['type'])
+def terminal_render_user (record, options):
+ print "%s (User)"%record['hrn'],
+ if record.get('reg-pi-authorities',None): print " [PI at %s]"%(" and ".join(record['reg-pi-authorities'])),
+ if record.get('reg-slices',None): print " [IN slices %s]"%(" and ".join(record['reg-slices'])),
+ user_keys=record.get('reg-keys',[])
+ if not options.verbose:
+ print " [has %s]"%(render_plural(len(user_keys),"key"))
+ else:
+ print ""
+ for key in user_keys: print 8*' ',key.strip("\n")
+
+def terminal_render_slice (record, options):
+ print "%s (Slice)"%record['hrn'],
+ if record.get('reg-researchers',None): print " [USERS %s]"%(" and ".join(record['reg-researchers'])),
+# print record.keys()
+ print ""
+def terminal_render_authority (record, options):
+ print "%s (Authority)"%record['hrn'],
+ if record.get('reg-pis',None): print " [PIS %s]"%(" and ".join(record['reg-pis'])),
+ print ""
+def terminal_render_node (record, options):
+ print "%s (Node)"%record['hrn']
+
# minimally check a key argument
def check_ssh_key (key):
good_ssh_key = r'^.*(?:ssh-dss|ssh-rsa)[ ]+[A-Za-z0-9+/=]+(?: .*)?$'
if command == 'list':
parser.add_option("-r", "--recursive", dest="recursive", action='store_true',
help="list all child records", default=False)
+ parser.add_option("-v", "--verbose", dest="verbose", action='store_true',
+ help="gives details, like user keys", default=False)
if command in ("delegate"):
parser.add_option("-u", "--user",
action="store_true", dest="delegate_user", default=False,
# Main: parse arguments and dispatch to command
#
def dispatch(self, command, command_options, command_args):
- return getattr(self, command)(command_options, command_args)
+ method=getattr(self, command,None)
+ if not method:
+ print "Unknown command %s"%command
+ return
+ return method(command_options, command_args)
def main(self):
self.sfi_parser = self.create_parser()
try:
self.dispatch(command, command_options, command_args)
- except KeyError:
- self.logger.critical ("Unknown command %s"%command)
+ except:
+ self.logger.log_exc ("sfi command %s failed"%command)
sys.exit(1)
return
raise Exception, "Not enough parameters for the 'list' command"
# filter on person, slice, site, node, etc.
- # THis really should be in the self.filter_records funct def comment...
+ # This really should be in the self.filter_records funct def comment...
list = filter_records(options.type, list)
- for record in list:
- print "%s (%s)" % (record['hrn'], record['type'])
+ terminal_render (list, options)
if options.file:
save_records_to_file(options.file, list, options.fileformat)
return
self.print_help()
sys.exit(1)
hrn = args[0]
- record_dicts = self.registry().Resolve(hrn, self.my_credential_string)
+ # explicitly require Resolve to run in details mode
+ record_dicts = self.registry().Resolve(hrn, self.my_credential_string, {'details':True})
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
self.logger.error("No record of type %s"% options.type)
# keys: [<ssh key A>, <ssh key B>]
# }]
users = []
+ # xxx Thierry 2012 sept. 21
+ # contrary to what I was first thinking, calling Resolve with details=False does not yet work properly here
+ # I am turning details=True on again on a - hopefully - temporary basis, just to get this whole thing to work again
slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
- if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]:
+ # slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string], {'details':True})
+ if slice_records and 'reg-researchers' in slice_records[0] and slice_records[0]['reg-researchers']:
slice_record = slice_records[0]
- user_hrns = slice_record['researcher']
+ user_hrns = slice_record['reg-researchers']
user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
--- /dev/null
+from sfa.generic import Generic
+
+class nitos (Generic):
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.nitosimporter
+ return sfa.importer.nitosimporter.NitosImporter
+
+ # use the standard api class
+ def api_class (self):
+ import sfa.server.sfaapi
+ return sfa.server.sfaapi.SfaApi
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.nitos.nitosdriver
+ return sfa.nitos.nitosdriver.NitosDriver
+
+ # for the component mode, to be run on board planetlab nodes
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
--- /dev/null
+
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+
+from sfa.trust.gid import create_uuid
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+from sfa.nitos.nitosshell import NitosShell
+from sfa.nitos.nitosxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_nitos_slicename, username_to_hrn
+
+def _get_site_hrn(interface_hrn, site):
+ hrn = ".".join([interface_hrn, site['name']])
+ return hrn
+
+
+class NitosImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ # hrn hash is initialized from current db
+ # remember just-created records as we go
+ # xxx might make sense to add a UNIQUE constraint in the db itself
+ def remember_record_by_hrn (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("NitosImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ # ditto for pointer hash
+ def remember_record_by_pointer (self, record):
+ if record.pointer == -1:
+ self.logger.warning ("NitosImporter.remember_record_by_pointer: pointer is void")
+ return
+ tuple = (record.type, record.pointer)
+ if tuple in self.records_by_type_pointer:
+ self.logger.warning ("NitosImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+ def remember_record (self, record):
+ self.remember_record_by_hrn (record)
+ self.remember_record_by_pointer (record)
+
+ def locate_by_type_hrn (self, type, hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ return self.records_by_type_pointer.get ( (type, pointer), None)
+
+ # a convenience/helper function to see if a record is already known
+ # a former, broken, attempt (in 2.1-9) had been made
+ # to try and use 'pointer' as a first, most significant attempt
+ # the idea being to preserve stuff as much as possible, and thus
+ # to avoid creating a new gid in the case of a simple hrn rename
+ # however this of course doesn't work as the gid depends on the hrn...
+ #def locate (self, type, hrn=None, pointer=-1):
+ # if pointer!=-1:
+ # attempt = self.locate_by_type_pointer (type, pointer)
+ # if attempt : return attempt
+ # if hrn is not None:
+ # attempt = self.locate_by_type_hrn (type, hrn,)
+ # if attempt : return attempt
+ # return None
+
+ # this makes the run method a bit abtruse - out of the way
+
+ def run (self, options):
+ config = Config ()
+ interface_hrn = config.SFA_INTERFACE_HRN
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ shell = NitosShell (config)
+
+ ######## retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ # create hash by (type,hrn)
+ # we essentially use this to know if a given record is already known to SFA
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (record.type, record.pointer) , record ) for record in all_records
+ if record.pointer != -1] )
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records: record.stale=True
+
+ ######## retrieve NITOS data
+ # Get site info
+ # retrieve only required stuf
+ site = shell.getTestbedInfo()
+ sites = [site]
+ # create a hash of sites by login_base
+# # sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
+ # Get all NITOS users
+ users = shell.getUsers()
+ # create a hash of users by user_id
+ users_by_id = dict ( [ ( user['user_id'], user) for user in users ] )
+ # Get all NITOS public keys
+ # accumulate key ids for keys retrieval
+# key_ids = []
+# for person in persons:
+# key_ids.extend(person['key_ids'])
+# keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids,
+# 'key_type': 'ssh'} )
+# # create a hash of keys by key_id
+# keys_by_id = dict ( [ ( key['key_id'], key ) for key in keys ] )
+ # create a dict user_id -> [ (nitos)keys ]
+ keys_by_user_id = dict ( [ ( user['user_id'], user['keys']) for user in users ] )
+ # Get all nitos nodes
+ nodes = shell.getNodes({}, [])
+ # create hash by node_id
+ nodes_by_id = dict ( [ (node['node_id'], node) for node in nodes ] )
+ # Get all nitos slices
+ slices = shell.getSlices({}, [])
+ # create hash by slice_id
+ slices_by_id = dict ( [ (slice['slice_id'], slice) for slice in slices ] )
+
+
+ # start importing
+ for site in sites:
+ #for i in [0]:
+ site_hrn = _get_site_hrn(interface_hrn, site)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ site_record=self.locate_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer=0,
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported authority (site) : %s" % site_record)
+ self.remember_record (site_record)
+ except:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("NitosImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records
+ for node in nodes:
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ node_hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
+ # xxx this sounds suspicious
+ if len(node_hrn) > 64: node_hrn = node_hrn[:64]
+ node_record = self.locate_by_type_hrn ( 'node', node_hrn )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(node_hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = RegNode (hrn=node_hrn, gid=node_gid,
+ pointer =node['node_id'],
+ authority=get_authority(node_hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported node: %s" % node_record)
+ self.remember_record (node_record)
+ except:
+ self.logger.log_exc("NitosImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+
+ node_record.stale=False
+
+
+ # import users
+ for user in users:
+ user_hrn = username_to_hrn(interface_hrn, site['name'], user['username'])
+ # xxx suspicious again
+ if len(user_hrn) > 64: user_hrn = user_hrn[:64]
+ user_urn = hrn_to_urn(user_hrn, 'user')
+
+ user_record = self.locate_by_type_hrn ( 'user', user_hrn)
+
+ # return a tuple pubkey (a nitos key object) and pkey (a Keypair object)
+ def init_user_key (user):
+ pubkey = None
+ pkey = None
+ if user['keys']:
+ # randomly pick first key in set
+ for key in user['keys']:
+ pubkey = key
+ try:
+ pkey = convert_public_key(pubkey)
+ break
+ except:
+ continue
+ if not pkey:
+ self.logger.warn('NitosImporter: unable to convert public key for %s' % user_hrn)
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("NitosImporter: user %s does not have a NITOS public key"%user_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+ # new user
+ try:
+ if not user_record:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ user_gid.set_email(user['email'])
+ user_record = RegUser (hrn=user_hrn, gid=user_gid,
+ pointer=user['user_id'],
+ authority=get_authority(user_hrn),
+ email=user['email'])
+ if pubkey:
+ user_record.reg_keys=[RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported user: %s" % user_record)
+ self.remember_record ( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+ def key_in_list (key,sfa_keys):
+ for reg_key in sfa_keys:
+ if reg_key.key==key: return True
+ return False
+ # is there a new key in NITOS ?
+ new_keys=False
+ for key in user['keys']:
+ if not key_in_list (key,sfa_keys):
+ new_keys = True
+ if new_keys:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ self.logger.info("NitosImporter: updated user: %s" % user_record)
+ user_record.email = user['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("NitosImporter: failed to import user %s %s"%(user['user_id'],user['email']))
+
+
+ # import slices
+ for slice in slices:
+ slice_hrn = slicename_to_hrn(interface_hrn, site['name'], slice['slice_name'])
+ slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
+ if not slice_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer=slice['slice_id'],
+ authority=get_authority(slice_hrn))
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported slice: %s" % slice_record)
+ self.remember_record ( slice_record )
+ except:
+ self.logger.log_exc("NitosImporter: failed to import slice")
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+ slice_record.reg_researchers = \
+ [ self.locate_by_type_pointer ('user',int(user_id)) for user_id in slice['user_ids'] ]
+ dbsession.commit()
+ slice_record.stale=False
+
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+ for record in all_records:
+ try: stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("NitosImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
+
+
+
+if __name__ == "__main__":
+ from sfa.util.sfalogging import logger
+ nitosimporter = NitosImporter("pla.nitos", logger)
+ nitosimporter.run(None)
slice_urn=xrn.get_urn()
slice_hrn=xrn.get_hrn()
+ # xxx sounds like GetTicket is dead, but if that gets resurrected we might wish
+ # to pass 'users' over to the driver as well
return self.driver.get_ticket (slice_urn, slice_hrn, creds, rspec, options)
# the following is used in Resolve (registry) when run in full mode
# after looking up the sfa db, we wish to be able to display
# testbed-specific info as well
- # this at minima should fill in the 'researcher' field for slice records
- # as this information is then used to compute rights
- # roadmap: there is an intention to redesign the SFA database so as to clear up
- # this constraint, based on the principle that SFA should not rely on the
- # testbed database to perform such a core operation (i.e. getting rights right)
+ # based on the principle that SFA should not rely on the testbed database
+ # to perform such a core operation (i.e. getting rights right)
+ # this is no longer in use when performing other SFA operations
def augment_records_with_testbed_info (self, sfa_records):
print >>sys.stderr, " \r\n \r\n DRIVER.PY augment_records_with_testbed_info sfa_records ",sfa_records
return sfa_records
# to the actual method calls anyway
self.manager = manager(config)
else:
+ # that's what happens when there's something wrong with the db
+ # or any bad stuff of that kind at startup time
+ logger.log_exc("Failed to create a manager, startup sequence is broken")
raise SfaAPIError,"Argument to ManagerWrapper must be a module or class"
self.interface = interface
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
-from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey
+from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey, \
+ augment_with_sfa_builtins
from sfa.storage.alchemy import dbsession
+### the types that we need to exclude from sqlobjects before being able to dump
+# them on the xmlrpc wire
+from sqlalchemy.orm.collections import InstrumentedList
class RegistryManager:
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
- # xxx for the record only
- # used to call this, which was wrong, now all needed data is natively is our DB
- # self.driver.augment_records_with_testbed_info (record.__dict__)
- # likewise, we deprecate is_enabled which was not really useful
- # if not self.driver.is_enabled (record.__dict__): ...
- # xxx for the record only
-
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
# object itself.
return new_cred.save_to_string(save_parents=True)
- def Resolve(self, api, xrns, type=None, full=True):
+ # the default for full, which means 'dig into the testbed as well', should be false
+ def Resolve(self, api, xrns, type=None, details=False):
if not isinstance(xrns, types.ListType):
# try to infer type if not set and we get a single input
credential = api.getCredential()
interface = api.registries[registry_hrn]
server_proxy = api.server_proxy(interface, credential)
+ # should propagate the details flag but that's not supported in the xmlrpc interface yet
+ #peer_records = server_proxy.Resolve(xrns, credential,type, details=details)
peer_records = server_proxy.Resolve(xrns, credential,type)
# pass foreign records as-is
# previous code used to read
if type:
local_records = local_records.filter_by(type=type)
local_records=local_records.all()
- logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type))
+
+ for local_record in local_records:
+ augment_with_sfa_builtins (local_record)
+
+ logger.info("Resolve, (details=%s,type=%s) local_records=%s "%(details,type,local_records))
local_dicts = [ record.__dict__ for record in local_records ]
- if full:
- # in full mode we get as much info as we can, which involves contacting the
+ if details:
+ # in details mode we get as much info as we can, which involves contacting the
# testbed for getting implementation details about the record
self.driver.augment_records_with_testbed_info(local_dicts)
- #logger.debug("Resolve: local_dicts =%s "%(local_dicts))
# also we fill the 'url' field for known authorities
# used to be in the driver code, sounds like a poorman thing though
def solve_neighbour_url (record):
- logger.debug("\r\n \t\t solve_neighbour_url: record = %s "%(record))
if not record.type.startswith('authority'): return
hrn=record.hrn
for neighbour_dict in [ api.aggregates, api.registries ]:
record.url=neighbour_dict[hrn].get_url()
return
for record in local_records: solve_neighbour_url (record)
- #logger.debug("\solve_neighbour_url = OK ")
# convert local record objects to dicts for xmlrpc
# xxx somehow here calling dict(record) issues a weird error
# however record.todict() seems to work fine
# records.extend( [ dict(record) for record in local_records ] )
- records.extend( [ record.todict() for record in local_records ] )
- #logger.debug("\RESOLVE = records %s " %(records))
+ records.extend( [ record.todict(exclude_types=[InstrumentedList]) for record in local_records ] )
+
if not records:
raise RecordNotFound(str(hrns))
records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn))
else:
records = dbsession.query(RegRecord).filter_by(authority=hrn)
- record_dicts=[ record.todict() for record in records ]
+ # so that sfi list can show more than plain names...
+ for record in records: augment_with_sfa_builtins (record)
+ record_dicts=[ record.todict(exclude_types=[InstrumentedList]) for record in records ]
return record_dicts
####################
# utility for handling relationships among the SFA objects
- # given that the SFA db does not handle this sort of relationsships
- # it will rely on side-effects in the testbed to keep this persistent
# subject_record describes the subject of the relationships
# ref_record contains the target values for the various relationships we need to manage
- # (to begin with, this is just the slice x person relationship)
+ # (to begin with, this is just the slice x person (researcher) and authority x person (pi) relationships)
def update_driver_relations (self, subject_obj, ref_obj):
type=subject_obj.type
#for (k,v) in subject_obj.__dict__.items(): print k,'=',v
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
# resolve the record
- records = self.api.manager.Resolve(self.api, xrns, full = False)
+ records = self.api.manager.Resolve(self.api, xrns, details = False)
if not records:
raise RecordNotFound(xrns)
# another registry if needed
# I wonder if this is truly the intention, or shouldn't we instead
# only look in the local db ?
- records = self.api.manager.Resolve(self.api, xrn, type)
+ records = self.api.manager.Resolve(self.api, xrn, type, details=False)
if not records:
raise RecordNotFound(hrn)
interfaces = ['registry']
+ # should we not accept an optional 'details' argument ?
accepts = [
Mixed(Parameter(str, "Human readable name (hrn or urn)"),
Parameter(list, "List of Human readable names ([hrn])")),
Mixed(Parameter(str, "Credential string"),
- Parameter(list, "List of credentials)"))
+ Parameter(list, "List of credentials)")),
+ Parameter(dict, "options"),
]
# xxx used to be [SfaRecord]
returns = [Parameter(dict, "registry record")]
- def call(self, xrns, creds):
+ def call(self, xrns, creds, options={}):
+ # use details=False by default, only when explicitly specified do we want
+ # to mess with the testbed details
+ if 'details' in options: details=options['details']
+ else: details=False
type = None
if not isinstance(xrns, types.ListType):
type = Xrn(xrns).get_type()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrns, self.name))
# send the call to the right manager
- return self.api.manager.Resolve(self.api, xrns, type)
+ return self.api.manager.Resolve(self.api, xrns, type, details=details)
--- /dev/null
+#!/usr/bin/python
+from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
+from sfa.util.sfatime import utcparse, datetime_to_string
+from sfa.util.sfalogging import logger
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.position_3d import Position3D
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.elements.channel import Channel
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.nitos.nitosxrn import NitosXrn, hostname_to_urn, hrn_to_nitos_slicename, slicename_to_hrn
+from sfa.planetlab.vlink import get_tc_rate
+from sfa.planetlab.topology import Topology
+
+import time
+
+class NitosAggregate:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def get_slice_and_slivers(self, slice_xrn):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ slice = None
+ if not slice_xrn:
+ return (slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices({'slice_name': slice_name}, [])
+ #filter results
+ for slc in slices:
+ if slc['slice_name'] == slice_name:
+ slice = slc
+ break
+
+ if not slice:
+ return (slice, slivers)
+
+ reserved_nodes = self.driver.shell.getReservedNodes({'slice_id': slice['slice_id']}, [])
+ reserved_node_ids = []
+ # filter on the slice
+ for node in reserved_nodes:
+ if node['slice_id'] == slice['slice_id']:
+ reserved_node_ids.append(node['node_id'])
+ #get all the nodes
+ all_nodes = self.driver.shell.getNodes({}, [])
+
+ for node in all_nodes:
+ if node['node_id'] in reserved_node_ids:
+ slivers[node['node_id']] = node
+
+ return (slice, slivers)
+
+
+
+ def get_nodes(self, slice_xrn, slice=None,slivers={}, options={}):
+ # if we are dealing with a slice that has no node just return
+ # and empty list
+ if slice_xrn:
+ if not slice or not slivers:
+ return []
+ else:
+ nodes = [slivers[sliver] for sliver in slivers]
+ else:
+ nodes = self.driver.shell.getNodes({}, [])
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.testbedInfo['grain']
+ #grain = 1800
+
+
+ rspec_nodes = []
+ for node in nodes:
+ rspec_node = Node()
+ site_name = self.driver.testbedInfo['name']
+ rspec_node['component_id'] = hostname_to_urn(self.driver.hrn, site_name, node['hostname'])
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['authority_id'] = hrn_to_urn(NitosXrn.site_hrn(self.driver.hrn, site_name), 'authority+sa')
+ # do not include boot state (<available> element) in the manifest rspec
+ #if not slice:
+ # rspec_node['boot_state'] = node['boot_state']
+ rspec_node['exclusive'] = 'true'
+ # site location
+ longitude = self.driver.testbedInfo['longitude']
+ latitude = self.driver.testbedInfo['latitude']
+ if longitude and latitude:
+ location = Location({'longitude': longitude, 'latitude': latitude, 'country': 'unknown'})
+ rspec_node['location'] = location
+ # 3D position
+ position_3d = Position3D({'x': node['position']['X'], 'y': node['position']['Y'], 'z': node['position']['Z']})
+ #position_3d = Position3D({'x': 1, 'y': 2, 'z': 3})
+ rspec_node['position_3d'] = position_3d
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+
+ # HardwareType
+ rspec_node['hardware_type'] = node['node_type']
+ #rspec_node['hardware_type'] = "orbit"
+
+ #slivers
+ if node['node_id'] in slivers:
+ # add sliver info
+ sliver = slivers[node['node_id']]
+ rspec_node['sliver_id'] = sliver['node_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+
+ rspec_nodes.append(rspec_node)
+ return rspec_nodes
+
+ def get_leases_and_channels(self, slice=None, slice_xrn=None, options={}):
+
+ slices = self.driver.shell.getSlices({}, [])
+ nodes = self.driver.shell.getNodes({}, [])
+ leases = self.driver.shell.getReservedNodes({}, [])
+ channels = self.driver.shell.getChannels({}, [])
+ reserved_channels = self.driver.shell.getReservedChannels()
+ grain = self.driver.testbedInfo['grain']
+
+ if slice_xrn and not slice:
+ return ([], [])
+
+ if slice:
+ all_leases = []
+ all_leases.extend(leases)
+ all_reserved_channels = []
+ all_reserved_channels.extend(reserved_channels)
+ for lease in all_leases:
+ if lease['slice_id'] != slice['slice_id']:
+ leases.remove(lease)
+ for channel in all_reserved_channels:
+ if channel['slice_id'] != slice['slice_id']:
+ reserved_channels.remove(channel)
+
+ rspec_channels = []
+ for channel in reserved_channels:
+
+ rspec_channel = {}
+ #retrieve channel number
+ for chl in channels:
+ if chl['channel_id'] == channel['channel_id']:
+ channel_number = chl['channel']
+ break
+
+ rspec_channel['channel_num'] = channel_number
+ rspec_channel['start_time'] = channel['start_time']
+ rspec_channel['duration'] = (int(channel['end_time']) - int(channel['start_time'])) / int(grain)
+
+ # retreive slicename
+ for slc in slices:
+ if slc['slice_id'] == channel['slice_id']:
+ slicename = slc['slice_name']
+ break
+
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
+ rspec_channel['slice_id'] = slice_urn
+ rspec_channels.append(rspec_channel)
+
+
+ rspec_leases = []
+ for lease in leases:
+
+ rspec_lease = Lease()
+
+ rspec_lease['lease_id'] = lease['reservation_id']
+ # retreive node name
+ for node in nodes:
+ if node['node_id'] == lease['node_id']:
+ nodename = node['hostname']
+ break
+
+ rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, self.driver.testbedInfo['name'], nodename)
+ # retreive slicename
+ for slc in slices:
+ if slc['slice_id'] == lease['slice_id']:
+ slicename = slc['slice_name']
+ break
+
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
+ rspec_lease['slice_id'] = slice_urn
+ rspec_lease['start_time'] = lease['start_time']
+ rspec_lease['duration'] = (int(lease['end_time']) - int(lease['start_time'])) / int(grain)
+ rspec_leases.append(rspec_lease)
+
+ return (rspec_leases, rspec_channels)
+
+
+ def get_channels(self, slice=None, options={}):
+
+ all_channels = self.driver.shell.getChannels({}, [])
+ channels = []
+ if slice:
+ reserved_channels = self.driver.shell.getReservedChannels()
+ reserved_channel_ids = []
+ for channel in reserved_channels:
+ if channel['slice_id'] == slice['slice_id']:
+ reserved_channel_ids.append(channel['channel_id'])
+
+ for channel in all_channels:
+ if channel['channel_id'] in reserved_channel_ids:
+ channels.append(channel)
+ else:
+ channels = all_channels
+
+ rspec_channels = []
+ for channel in channels:
+ rspec_channel = Channel()
+ rspec_channel['channel_num'] = channel['channel']
+ rspec_channel['frequency'] = channel['frequency']
+ rspec_channel['standard'] = channel['modulation']
+ rspec_channels.append(rspec_channel)
+ return rspec_channels
+
+
+
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+
+ slice, slivers = self.get_slice_and_slivers(slice_xrn)
+
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+ if slice and 'expires' in slice:
+ rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+ nodes = self.get_nodes(slice_xrn, slice, slivers, options)
+ rspec.version.add_nodes(nodes)
+ # add sliver defaults
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ for attrib in default_sliver_attribs:
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+ # add wifi channels
+ channels = self.get_channels(slice, options)
+ rspec.version.add_channels(channels)
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases, channels = self.get_leases_and_channels(slice, slice_xrn)
+ rspec.version.add_leases(leases, channels)
+
+ return rspec.toxml()
+
+
--- /dev/null
+import time
+import datetime
+#
+from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
+from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_hrn
+from sfa.util.cache import Cache
+
+# one would think the driver should not need to mess with the SFA db, but..
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+# used to be used in get_ticket
+#from sfa.trust.sfaticket import SfaTicket
+
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+# the driver interface, mostly provides default behaviours
+from sfa.managers.driver import Driver
+
+from sfa.nitos.nitosshell import NitosShell
+from sfa.nitos.nitosaggregate import NitosAggregate
+from sfa.nitos.nitosslices import NitosSlices
+
+from sfa.nitos.nitosxrn import NitosXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_nitos_slicename, xrn_to_hostname
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ return dict ( [ (rec[key],rec) for rec in recs ] )
+
+#
+# NitosShell is just an xmlrpc serverproxy where methods
+# can be sent as-is; it takes care of authentication
+# from the global config
+#
+class NitosDriver (Driver):
+
+ # the cache instance is a class member so it survives across incoming requests
+ cache = None
+
+ def __init__ (self, config):
+ Driver.__init__ (self, config)
+ self.shell = NitosShell (config)
+ self.cache=None
+ self.testbedInfo = self.shell.getTestbedInfo()
+# un-comment below lines to enable caching
+# if config.SFA_AGGREGATE_CACHING:
+# if NitosDriver.cache is None:
+# NitosDriver.cache = Cache()
+# self.cache = NitosDriver.cache
+
+ ###########################################
+ ########## utility methods for NITOS driver
+ ###########################################
+
+
+ def filter_nitos_results (self, listo, filters_dict):
+ """
+ the Nitos scheduler API does not provide a get result filtring so we do it here
+ """
+ mylist = []
+ mylist.extend(listo)
+ for dicto in mylist:
+ for filter in filters_dict:
+ if filter not in dicto or dicto[filter] != filters_dict[filter]:
+ listo.remove(dicto)
+ break
+ return listo
+
+ def convert_id (self, list_of_dict):
+ """
+ convert object id retrived in string format to int format
+ """
+ for dicto in list_of_dict:
+ for key in dicto:
+ if key in ['node_id', 'slice_id', 'user_id', 'channel_id', 'reservation_id'] and isinstance(dicto[key], str):
+ dicto[key] = int(dicto[key])
+ elif key in ['user_ids']:
+ user_ids2 = []
+ for user_id in dicto['user_ids']:
+ user_ids2.append(int(user_id))
+ dicto['user_ids'] = user_ids2
+ return list_of_dict
+
+
+
+ ########################################
+ ########## registry oriented
+ ########################################
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ ##########
+ def register (self, sfa_record, hrn, pub_key):
+ type = sfa_record['type']
+ nitos_record = self.sfa_fields_to_nitos_fields(type, hrn, sfa_record)
+
+ if type == 'authority':
+ pointer = -1
+
+ elif type == 'slice':
+ slices = self.shell.getSlices()
+ # filter slices
+ for slice in slices:
+ if slice['slice_name'] == nitos_record['name']:
+ slice_id = slice['slice_id']
+ break
+
+ if not slice_id:
+ pointer = self.shell.addSlice({'slice_name' : nitos_record['name']})
+ else:
+ pointer = slice_id
+
+ elif type == 'user':
+ users = self.shell.getUsers()
+ # filter users
+ for user in users:
+ if user['user_name'] == nitos_record['name']:
+ user_id = user['user_id']
+ break
+ if not user_id:
+ pointer = self.shell.addUser({'username' : nitos_record['name'], 'email' : nitos_record['email']})
+ else:
+ pointer = user_id
+
+
+ # Add the user's key
+ if pub_key:
+ self.shell.addUserKey({'user_id' : pointer,'key' : pub_key})
+
+ elif type == 'node':
+ nodes = self.shell.GetNodes({}, [])
+ # filter nodes
+ for node in nodes:
+ if node['hostname'] == nitos_record['name']:
+ node_id = node['node_id']
+ break
+
+ if not node_id:
+ pointer = self.shell.addNode(nitos_record)
+ else:
+ pointer = node_id
+
+ return pointer
+
+ ##########
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+
+ pointer = old_sfa_record['pointer']
+ type = old_sfa_record['type']
+ new_nitos_record = self.sfa_fields_to_nitos_fields(type, hrn, new_sfa_record)
+
+ # new_key implemented for users only
+ if new_key and type not in [ 'user' ]:
+ raise UnknownSfaType(type)
+
+ if type == "slice":
+ if 'name' in new_sfa_record:
+ self.shell.updateSlice({'slice_id': pointer, 'fields': {'slice_name': new_sfa_record['name']}})
+
+ elif type == "user":
+ update_fields = {}
+ if 'name' in new_sfa_record:
+ update_fields['username'] = new_sfa_record['name']
+ if 'email' in new_sfa_record:
+ update_fields['email'] = new_sfa_record['email']
+
+ self.shell.updateUser({'user_id': pointer, 'fields': update_fields})
+
+ if new_key:
+ # needs to be improved
+ self.shell.addUserKey({'user_id': pointer, 'key': new_key})
+
+ elif type == "node":
+ self.shell.updateNode({'node_id': pointer, 'fields': new_sfa_record})
+
+ return True
+
+
+ ##########
+ def remove (self, sfa_record):
+
+ type=sfa_record['type']
+ pointer=sfa_record['pointer']
+ if type == 'user':
+ self.shell.deleteUser({'user_id': pointer})
+ elif type == 'slice':
+ self.shell.deleteSlice({'slice_id': pointer})
+ elif type == 'node':
+ self.shell.deleteNode({'node_id': pointer})
+
+ return True
+
+
+
+
+
+ ##
+ # Convert SFA fields to NITOS fields for use when registering or updating
+ # registry record in the NITOS Scheduler database
+ #
+
+ def sfa_fields_to_nitos_fields(self, type, hrn, sfa_record):
+
+ nitos_record = {}
+
+ if type == "slice":
+ nitos_record["slice_name"] = hrn_to_nitos_slicename(hrn)
+ elif type == "node":
+ if "hostname" not in sfa_record:
+ raise MissingSfaInfo("hostname")
+ nitos_record["node_name"] = sfa_record["hostname"]
+
+ return nitos_record
+
+ ####################
+ def fill_record_info(self, records):
+ """
+ Given a (list of) SFA record, fill in the NITOS specific
+ and SFA specific fields in the record.
+ """
+ if not isinstance(records, list):
+ records = [records]
+
+ self.fill_record_nitos_info(records)
+ self.fill_record_hrns(records)
+ self.fill_record_sfa_info(records)
+ return records
+
+ def fill_record_nitos_info(self, records):
+ """
+ Fill in the nitos specific fields of a SFA record. This
+ involves calling the appropriate NITOS API method to retrieve the
+ database record for the object.
+
+ @param record: record to fill in field (in/out param)
+ """
+
+ # get ids by type
+ node_ids, slice_ids = [], []
+ user_ids, key_ids = [], []
+ type_map = {'node': node_ids, 'slice': slice_ids, 'user': user_ids}
+
+ for record in records:
+ for type in type_map:
+ if type == record['type']:
+ type_map[type].append(record['pointer'])
+
+ # get nitos records
+ nodes, slices, users, keys = {}, {}, {}, {}
+ if node_ids:
+ all_nodes = self.convert_id(self.shell.getNodes({}, []))
+ node_list = [node for node in all_nodes if node['node_id'] in node_ids]
+ nodes = list_to_dict(node_list, 'node_id')
+ if slice_ids:
+ all_slices = self.convert_id(self.shell.getSlices({}, []))
+ slice_list = [slice for slice in all_slices if slice['slice_id'] in slice_ids]
+ slices = list_to_dict(slice_list, 'slice_id')
+ if user_ids:
+ all_users = self.convert_id(self.shell.getUsers())
+ user_list = [user for user in all_users if user['user_id'] in user_ids]
+ users = list_to_dict(user_list, 'user_id')
+
+ nitos_records = {'node': nodes, 'slice': slices, 'user': users}
+
+
+ # fill record info
+ for record in records:
+ if record['pointer'] == -1:
+ continue
+
+ for type in nitos_records:
+ if record['type'] == type:
+ if record['pointer'] in nitos_records[type]:
+ record.update(nitos_records[type][record['pointer']])
+ break
+ # fill in key info
+ if record['type'] == 'user':
+ if record['pointer'] in nitos_records['user']:
+ record['keys'] = nitos_records['user'][record['pointer']]['keys']
+
+ return records
+
+
+ def fill_record_hrns(self, records):
+ """
+ convert nitos ids to hrns
+ """
+
+
+ # get ids
+ slice_ids, user_ids, node_ids = [], [], []
+ for record in records:
+ if 'user_ids' in record:
+ user_ids.extend(record['user_ids'])
+ if 'slice_ids' in record:
+ slice_ids.extend(record['slice_ids'])
+ if 'node_ids' in record:
+ node_ids.extend(record['node_ids'])
+
+ # get nitos records
+ slices, users, nodes = {}, {}, {}
+ if node_ids:
+ all_nodes = self.convert_id(self.shell.getNodes({}, []))
+ node_list = [node for node in all_nodes if node['node_id'] in node_ids]
+ nodes = list_to_dict(node_list, 'node_id')
+ if slice_ids:
+ all_slices = self.convert_id(self.shell.getSlices({}, []))
+ slice_list = [slice for slice in all_slices if slice['slice_id'] in slice_ids]
+ slices = list_to_dict(slice_list, 'slice_id')
+ if user_ids:
+ all_users = self.convert_id(self.shell.getUsers())
+ user_list = [user for user in all_users if user['user_id'] in user_ids]
+ users = list_to_dict(user_list, 'user_id')
+
+
+ # convert ids to hrns
+ for record in records:
+ # get all relevant data
+ type = record['type']
+ pointer = record['pointer']
+ auth_hrn = self.hrn
+ testbed_name = self.testbedInfo['name']
+ if pointer == -1:
+ continue
+ if 'user_ids' in record:
+ usernames = [users[user_id]['username'] for user_id in record['user_ids'] \
+ if user_id in users]
+ user_hrns = [".".join([auth_hrn, testbed_name, username]) for username in usernames]
+ record['users'] = user_hrns
+ if 'slice_ids' in record:
+ slicenames = [slices[slice_id]['slice_name'] for slice_id in record['slice_ids'] \
+ if slice_id in slices]
+ slice_hrns = [slicename_to_hrn(auth_hrn, slicename) for slicename in slicenames]
+ record['slices'] = slice_hrns
+ if 'node_ids' in record:
+ hostnames = [nodes[node_id]['hostname'] for node_id in record['node_ids'] \
+ if node_id in nodes]
+ node_hrns = [hostname_to_hrn(auth_hrn, login_base, hostname) for hostname in hostnames]
+ record['nodes'] = node_hrns
+
+ if 'expires' in record:
+ date = utcparse(record['expires'])
+ datestring = datetime_to_string(date)
+ record['expires'] = datestring
+
+ return records
+
+ def fill_record_sfa_info(self, records):
+
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
+ # get user ids
+ user_ids = []
+ for record in records:
+ user_ids.extend(record.get("user_ids", []))
+
+ # get the registry records
+ user_list, users = [], {}
+ user_list = dbsession.query(RegRecord).filter(RegRecord.pointer.in_(user_ids)).all()
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ users = defaultdict(list)
+ for user in user_list:
+ users[user.pointer].append(user)
+
+ # get the nitos records
+ nitos_user_list, nitos_users = [], {}
+ nitos_all_users = self.convert_id(self.shell.getUsers())
+ nitos_user_list = [user for user in nitos_all_users if user['user_id'] in user_ids]
+ nitos_users = list_to_dict(nitos_user_list, 'user_id')
+
+
+ # fill sfa info
+ for record in records:
+ if record['pointer'] == -1:
+ continue
+
+ sfa_info = {}
+ type = record['type']
+ logger.info("fill_record_sfa_info - incoming record typed %s"%type)
+ if (type == "slice"):
+ # all slice users are researchers
+ record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+ record['researcher'] = []
+ for user_id in record.get('user_ids', []):
+ hrns = [user.hrn for user in users[user_id]]
+ record['researcher'].extend(hrns)
+
+ elif (type == "node"):
+ sfa_info['dns'] = record.get("hostname", "")
+ # xxx TODO: URI, LatLong, IP, DNS
+
+ elif (type == "user"):
+ logger.info('setting user.email')
+ sfa_info['email'] = record.get("email", "")
+ sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+ sfa_info['geni_certificate'] = record['gid']
+ # xxx TODO: PostalAddress, Phone
+ record.update(sfa_info)
+
+ ####################
+ def update_relation (self, subject_type, target_type, relation_name, subject_id, target_ids):
+
+ if subject_type =='slice' and target_type == 'user' and relation_name == 'researcher':
+ subject=self.shell.getSlices ({'slice_id': subject_id}, [])[0]
+ current_target_ids = subject['user_ids']
+ add_target_ids = list ( set (target_ids).difference(current_target_ids))
+ del_target_ids = list ( set (current_target_ids).difference(target_ids))
+ logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+ for target_id in add_target_ids:
+ self.shell.addUserToSlice ({'user_id': target_id, 'slice_id': subject_id})
+ logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ for target_id in del_target_ids:
+ logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ self.shell.deleteUserFromSlice ({'user_id': target_id, 'slice_id': subject_id})
+ else:
+ logger.info('unexpected relation %s to maintain, %s -> %s'%(relation_name,subject_type,target_type))
+
+
+ ########################################
+ ########## aggregate oriented
+ ########################################
+
+ def testbed_name (self): return "nitos"
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ if self.cache:
+ slices = self.cache.get('slices')
+ if slices:
+ logger.debug("NitosDriver.list_slices returns from cache")
+ return slices
+
+ # get data from db
+ slices = self.shell.getSlices({}, [])
+ testbed_name = self.testbedInfo['name']
+ slice_hrns = [slicename_to_hrn(self.hrn, testbed_name, slice['slice_name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ if self.cache:
+ logger.debug ("NitosDriver.list_slices stores value in cache")
+ self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ cached_requested = options.get('cached', True)
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ #rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ # rspec's return format for nitos aggregate is version NITOS 1
+ rspec_version = version_manager.get_version('NITOS 1')
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_"+options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_"+options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + str(options.get('geni_available'))
+
+ # look in cache first
+ if cached_requested and self.cache and not slice_hrn:
+ rspec = self.cache.get(version_string)
+ if rspec:
+ logger.debug("NitosDriver.ListResources: returning cached advertisement")
+ return rspec
+
+ #panos: passing user-defined options
+ #print "manager options = ",options
+ aggregate = NitosAggregate(self)
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
+ options=options)
+
+ # cache the result
+ if self.cache and not slice_hrn:
+ logger.debug("NitosDriver.ListResources: stores advertisement in cache")
+ self.cache.add(version_string, rspec)
+
+ return rspec
+
+ def sliver_status (self, slice_urn, slice_hrn):
+ # find out where this slice is currently running
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+
+ slices = self.shell.getSlices({}, [])
+ # filter slicename
+ if len(slices) == 0:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ for slice in slices:
+ if slice['slice_name'] == slicename:
+ user_slice = slice
+ break
+
+ if not user_slice:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ # report about the reserved nodes only
+ reserved_nodes = self.shell.getReservedNodes({}, [])
+ nodes = self.shell.getNodes({}, [])
+
+ slice_reserved_nodes = []
+ for r_node in reserved_nodes:
+ if r_node['slice_id'] == slice['slice_id']:
+ for node in nodes:
+ if node['node_id'] == r_node['node_id']:
+ slice_reserved_nodes.append(node)
+
+
+
+
+ if len(slice_reserved_nodes) == 0:
+ raise SliverDoesNotExist("You have not allocated any slivers here")
+
+##### continue from here
+ # get login info
+ user = {}
+ keys = []
+ if slice['user_ids']:
+ users = self.shell.getUsers()
+ # filter users on slice['user_ids']
+ for usr in users:
+ if usr['user_id'] in slice['user_ids']:
+ keys.extend(usr['keys'])
+
+
+ user.update({'urn': slice_urn,
+ 'login': slice['slice_name'],
+ 'protocol': ['ssh'],
+ 'port': ['22'],
+ 'keys': keys})
+
+
+ result = {}
+ top_level_status = 'unknown'
+ if slice_reserved_nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['nitos_gateway_login'] = slice['slice_name']
+ #result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+ #result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
+
+ resources = []
+ for node in slice_reserved_nodes:
+ res = {}
+ res['nitos_hostname'] = node['hostname']
+ sliver_id = Xrn(slice_urn, type='slice', id=node['node_id']).urn
+ res['geni_urn'] = sliver_id
+ res['geni_status'] = 'ready'
+ res['geni_error'] = ''
+ res['users'] = [user]
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+
+ return result
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+ aggregate = NitosAggregate(self)
+ slices = NitosSlices(self)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record=None
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string, version='NITOS 1')
+
+ # ensure slice record exists
+ slice = slices.verify_slice(slice_hrn, slice_record, sfa_peer, options=options)
+ # ensure user records exists
+ users = slices.verify_users(slice_hrn, slice, users, sfa_peer, options=options)
+
+ # add/remove leases (nodes and channels)
+ # a lease in Nitos RSpec case is a reservation of nodes and channels grouped by (slice,timeslot)
+ rspec_requested_nodes, rspec_requested_channels = rspec.version.get_leases()
+
+ nodes = slices.verify_slice_leases_nodes(slice, rspec_requested_nodes)
+ channels = slices.verify_slice_leases_channels(slice, rspec_requested_channels)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.filter_nitos_results(self.shell.getSlices({}, []), {'slice_name': slicename})
+ if not slices:
+ return 1
+ slice = slices[0]
+
+ slice_reserved_nodes = self.filter_nitos_results(self.shell.getReservedNodes({}, []), {'slice_id': slice['slice_id'] })
+ slice_reserved_channels = self.filter_nitos_results(self.shell.getReservedChannels(), {'slice_id': slice['slice_id'] })
+
+ slice_reserved_nodes_ids = [node['reservation_id'] for node in slice_reserved_nodes]
+ slice_reserved_channels_ids = [channel['reservation_id'] for channel in slice_reserved_channels]
+
+ # release all reserved nodes and channels for that slice
+ try:
+ released_nodes = self.shell.releaseNodes({'reservation_ids': slice_reserved_nodes_ids})
+ released_channels = self.shell.releaseChannels({'reservation_ids': slice_reserved_channels_ids})
+ except:
+ pass
+ return 1
+
+ def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'slicename': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice = slices[0]
+ requested_time = utcparse(expiration_time)
+ record = {'expires': int(datetime_to_epoch(requested_time))}
+ try:
+ self.shell.UpdateSlice(slice['slice_id'], record)
+
+ return True
+ except:
+ return False
+
+
+ # xxx this code is quite old and has not run for ages
+ # it is obviously totally broken and needs a rewrite
+ def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
+ raise SfaNotImplemented,"NitosDriver.get_ticket needs a rewrite"
+# please keep this code for future reference
+# slices = PlSlices(self)
+# peer = slices.get_peer(slice_hrn)
+# sfa_peer = slices.get_sfa_peer(slice_hrn)
+#
+# # get the slice record
+# credential = api.getCredential()
+# interface = api.registries[api.hrn]
+# registry = api.server_proxy(interface, credential)
+# records = registry.Resolve(xrn, credential)
+#
+# # make sure we get a local slice record
+# record = None
+# for tmp_record in records:
+# if tmp_record['type'] == 'slice' and \
+# not tmp_record['peer_authority']:
+# #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
+# slice_record = SliceRecord(dict=tmp_record)
+# if not record:
+# raise RecordNotFound(slice_hrn)
+#
+# # similar to CreateSliver, we must verify that the required records exist
+# # at this aggregate before we can issue a ticket
+# # parse rspec
+# rspec = RSpec(rspec_string)
+# requested_attributes = rspec.version.get_slice_attributes()
+#
+# # ensure site record exists
+# site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure slice record exists
+# slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure person records exists
+# # xxx users is undefined in this context
+# persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
+# # ensure slice attributes exists
+# slices.verify_slice_attributes(slice, requested_attributes)
+#
+# # get sliver info
+# slivers = slices.get_slivers(slice_hrn)
+#
+# if not slivers:
+# raise SliverDoesNotExist(slice_hrn)
+#
+# # get initscripts
+# initscripts = []
+# data = {
+# 'timestamp': int(time.time()),
+# 'initscripts': initscripts,
+# 'slivers': slivers
+# }
+#
+# # create the ticket
+# object_gid = record.get_gid_object()
+# new_ticket = SfaTicket(subject = object_gid.get_subject())
+# new_ticket.set_gid_caller(api.auth.client_gid)
+# new_ticket.set_gid_object(object_gid)
+# new_ticket.set_issuer(key=api.key, subject=self.hrn)
+# new_ticket.set_pubkey(object_gid.get_pubkey())
+# new_ticket.set_attributes(data)
+# new_ticket.set_rspec(rspec)
+# #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+# new_ticket.encode()
+# new_ticket.sign()
+#
+# return new_ticket.save_to_string(save_parents=True)
--- /dev/null
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+
+from sfa.util.sfalogging import logger
+
+class NitosShell:
+ """
+ A simple xmlrpc shell to a NITOS Scheduler instance
+ This class can receive all NITOS API calls to the underlying testbed
+ For safety this is limited to a set of hard-coded calls
+ """
+
+ direct_calls = ['getNodes','getChannels','getSlices','getUsers','getReservedNodes',
+ 'getReservedChannels','getTestbedInfo',
+ 'reserveNodes','reserveChannels','addSlice','addUser','addUserToSlice',
+ 'addUserKey','addNode', 'addChannel',
+ 'updateReservedNodes','updateReservedChannels','updateSlice','updateUser',
+ 'updateNode', 'updateChannel',
+ 'deleteNode','deleteChannel','deleteSlice','deleteUser', 'deleteUserFromSLice',
+ 'deleteKey', 'releaseNodes', 'releaseChannels'
+ ]
+
+
+ # use the 'capability' auth mechanism for higher performance when the PLC db is local
+ def __init__ ( self, config ) :
+ url = config.SFA_NITOS_URL
+ self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
+
+ def __getattr__(self, name):
+ def func(*args, **kwds):
+ actual_name=None
+ if name in NitosShell.direct_calls: actual_name=name
+ if not actual_name:
+ raise Exception, "Illegal method call %s for NITOS driver"%(name)
+ actual_name = "scheduler.server." + actual_name
+ result=getattr(self.proxy, actual_name)(*args, **kwds)
+ logger.debug('NitosShell %s (%s) returned ... '%(name,actual_name))
+ return result
+ return func
+
--- /dev/null
+from types import StringTypes
+from collections import defaultdict
+
+from sfa.util.sfatime import utcparse, datetime_to_epoch
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
+
+from sfa.rspecs.rspec import RSpec
+
+from sfa.nitos.nitosxrn import NitosXrn, hrn_to_nitos_slicename, xrn_to_hostname
+
+MAXINT = 2L**31-1
+
+class NitosSlices:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def get_sfa_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+ def verify_slice_leases_nodes(self, slice, rspec_requested_nodes):
+ nodes = self.driver.shell.getNodes({}, [])
+
+ requested_nodes = []
+ for node in rspec_requested_nodes:
+ requested_node = {}
+ nitos_nodes = []
+ nitos_nodes.extend(nodes)
+ slice_name = hrn_to_nitos_slicename(node['slice_id'])
+ if slice_name != slice['slice_name']:
+ continue
+ hostname = xrn_to_hostname(node['component_id'])
+ nitos_node = self.driver.filter_nitos_results(nitos_nodes, {'hostname': hostname})
+ if not nitos_node:
+ continue
+ nitos_node = nitos_node[0]
+ # fill the requested node with nitos ids
+ requested_node['slice_id'] = slice['slice_id']
+ requested_node['node_id'] = nitos_node['node_id']
+ requested_node['start_time'] = node['start_time']
+ requested_node['end_time'] = str(int(node['duration']) * int(self.driver.testbedInfo['grain']) + int(node['start_time']))
+ requested_nodes.append(requested_node)
+
+ # get actual nodes reservation data for the slice
+ reserved_nodes = self.driver.filter_nitos_results(self.driver.shell.getReservedNodes({}, []), {'slice_id': slice['slice_id']})
+
+ reserved_nodes_by_id = {}
+ for node in reserved_nodes:
+ reserved_nodes_by_id[node['reservation_id']] = {'slice_id': node['slice_id'], \
+ 'node_id': node['node_id'], 'start_time': node['start_time'], \
+ 'end_time': node['end_time']}
+
+ added_nodes = []
+ kept_nodes_id = []
+ deleted_nodes_id = []
+ for reservation_id in reserved_nodes_by_id:
+ if reserved_nodes_by_id[reservation_id] not in requested_nodes:
+ deleted_nodes_id.append(reservation_id)
+ else:
+ kept_nodes_id.append(reservation_id)
+ requested_nodes.remove(reserved_nodes_by_id[reservation_id])
+ added_nodes = requested_nodes
+
+
+ try:
+ deleted=self.driver.shell.releaseNodes({'reservation_ids': deleted_nodes_id})
+ for node in added_nodes:
+ added=self.driver.shell.reserveNodes({'slice_id': slice['slice_id'], 'start_time': node['start_time'], 'end_time': node['end_time'], 'nodes': [node['node_id']]})
+
+ except:
+ logger.log_exc('Failed to add/remove slice leases nodes')
+
+ return added_nodes
+
+
+ def verify_slice_leases_channels(self, slice, rspec_requested_channels):
+ channels = self.driver.shell.getChannels({}, [])
+
+ requested_channels = []
+ for channel in rspec_requested_channels:
+ requested_channel = {}
+ nitos_channels = []
+ nitos_channels.extend(channels)
+ slice_name = hrn_to_nitos_slicename(channel['slice_id'])
+ if slice_name != slice['slice_name']:
+ continue
+ channel_num = channel['channel_num']
+ nitos_channel = self.driver.filter_nitos_results(nitos_channels, {'channel': channel_num})[0]
+ # fill the requested channel with nitos ids
+ requested_channel['slice_id'] = slice['slice_id']
+ requested_channel['channel_id'] = nitos_channel['channel_id']
+ requested_channel['start_time'] = channel['start_time']
+ requested_channel['end_time'] = str(int(channel['duration']) * int(self.driver.testbedInfo['grain']) + int(channel['start_time']))
+ requested_channels.append(requested_channel)
+
+ # get actual channel reservation data for the slice
+ reserved_channels = self.driver.filter_nitos_results(self.driver.shell.getReservedChannels(), {'slice_id': slice['slice_id']})
+
+ reserved_channels_by_id = {}
+ for channel in reserved_channels:
+ reserved_channels_by_id[channel['reservation_id']] = {'slice_id': channel['slice_id'], \
+ 'channel_id': channel['channel_id'], 'start_time': channel['start_time'], \
+ 'end_time': channel['end_time']}
+
+ added_channels = []
+ kept_channels_id = []
+ deleted_channels_id = []
+ for reservation_id in reserved_channels_by_id:
+ if reserved_channels_by_id[reservation_id] not in requested_channels:
+ deleted_channels_id.append(reservation_id)
+ else:
+ kept_channels_id.append(reservation_id)
+ requested_channels.remove(reserved_channels_by_id[reservation_id])
+ added_channels = requested_channels
+
+
+ try:
+ deleted=self.driver.shell.releaseChannels({'reservation_ids': deleted_channels_id})
+ for channel in added_channels:
+ added=self.driver.shell.reserveChannels({'slice_id': slice['slice_id'], 'start_time': channel['start_time'], 'end_time': channel['end_time'], 'channels': [channel['channel_id']]})
+
+ except:
+ logger.log_exc('Failed to add/remove slice leases channels')
+
+ return added_channels
+
+
+ def free_egre_key(self):
+ used = set()
+ for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
+ used.add(int(tag['value']))
+
+ for i in range(1, 256):
+ if i not in used:
+ key = i
+ break
+ else:
+ raise KeyError("No more EGRE keys available")
+
+ return str(key)
+
+
+
+ def verify_slice(self, slice_hrn, slice_record, sfa_peer, options={}):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices({}, [])
+ slices = self.driver.filter_nitos_results(slices, {'slice_name': slicename})
+ if not slices:
+ slice = {'slice_name': slicename}
+ # add the slice
+ slice['slice_id'] = self.driver.shell.addSlice(slice)
+ slice['node_ids'] = []
+ slice['user_ids'] = []
+ else:
+ slice = slices[0]
+
+ return slice
+
+ def verify_users(self, slice_hrn, slice_record, users, sfa_peer, options={}):
+ # get slice info
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices({}, [])
+ slice = self.driver.filter_nitos_results(slices, {'slice_name': slicename})[0]
+ added_users = []
+ #get users info
+ users_info = []
+ for user in users:
+ user_urn = user['urn']
+ user_hrn, type = urn_to_hrn(user_urn)
+ username = str(user_hrn).split('.')[-1]
+ email = user['email']
+ # look for the user according to his username, email...
+ nitos_users = self.driver.filter_nitos_results(self.driver.shell.getUsers(), {'username': username})
+ if not nitos_users:
+ nitos_users = self.driver.filter_nitos_results(self.driver.shell.getUsers(), {'email': email})
+
+ if not nitos_users:
+ # create the user
+ user_id = self.driver.shell.addUsers({'username': email.split('@')[0], 'email': email})
+ added_users.append(user_id)
+ # add user keys
+ for key in user['keys']:
+ self.driver.shell.addUserKey({'user_id': user_id, 'key': key})
+ # add the user to the slice
+ self.driver.shell.addUserToSlice({'slice_id': slice['slice_id'], 'user_id': user_id})
+ else:
+ # check if the users are in the slice
+ for user in nitos_users:
+ if not user['user_id'] in slice['user_ids']:
+ self.driver.shell.addUserToSlice({'slice_id': slice['slice_id'], 'user_id': user['user_id']})
+
+ return added_users
+
+
+ def verify_keys(self, persons, users, options={}):
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ try:
+ if peer:
+ person = persondict[user['email']]
+ self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+ key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
+ if peer:
+ key_index = user_keys.index(key['key'])
+ remote_key_id = user['key_ids'][key_index]
+ self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
+
+ finally:
+ if peer:
+ self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for existing_key_id in keydict:
+ if keydict[existing_key_id] in removed_keys:
+ try:
+ if peer:
+ self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+ self.driver.shell.DeleteKey(existing_key_id)
+ except:
+ pass
+
+
--- /dev/null
+# specialized Xrn class for NITOS
+import re
+from sfa.util.xrn import Xrn
+
+# temporary helper functions to use this module instead of namespace
+def hostname_to_hrn (auth, login_base, hostname):
+ return NitosXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
+def hostname_to_urn(auth, login_base, hostname):
+ return NitosXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
+def slicename_to_hrn (auth_hrn,site_name,slicename):
+ return NitosXrn(auth=auth_hrn+'.'+site_name,slicename=slicename).get_hrn()
+# hack to convert nitos user name to hrn
+def username_to_hrn (auth_hrn,site_name,username):
+ return NitosXrn(auth=auth_hrn+'.'+site_name,slicename=username).get_hrn()
+def email_to_hrn (auth_hrn, email):
+ return NitosXrn(auth=auth_hrn, email=email).get_hrn()
+def hrn_to_nitos_slicename (hrn):
+ return NitosXrn(xrn=hrn,type='slice').nitos_slicename()
+# removed-dangerous - was used for non-slice objects
+#def hrn_to_nitos_login_base (hrn):
+# return NitosXrn(xrn=hrn,type='slice').nitos_login_base()
+def hrn_to_nitos_authname (hrn):
+ return NitosXrn(xrn=hrn,type='any').nitos_authname()
+def xrn_to_hostname(hrn):
+ return Xrn.unescape(NitosXrn(xrn=hrn, type='node').get_leaf())
+
+class NitosXrn (Xrn):
+
+ @staticmethod
+ def site_hrn (auth, login_base):
+ return '.'.join([auth,login_base])
+
+ def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
+ #def hostname_to_hrn(auth_hrn, login_base, hostname):
+ if hostname is not None:
+ self.type='node'
+ # keep only the first part of the DNS name
+ #self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
+ # escape the '.' in the hostname
+ self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
+ self.hrn_to_urn()
+ #def slicename_to_hrn(auth_hrn, slicename):
+ elif slicename is not None:
+ self.type='slice'
+ self.hrn = ".".join([auth] + [slicename.replace(".", "_")])
+ self.hrn_to_urn()
+ #def email_to_hrn(auth_hrn, email):
+ elif email is not None:
+ self.type='person'
+ # keep only the part before '@' and replace special chars into _
+ self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
+ self.hrn_to_urn()
+ elif interface is not None:
+ self.type = 'interface'
+ self.hrn = auth + '.' + interface
+ self.hrn_to_urn()
+ else:
+ Xrn.__init__ (self,**kwargs)
+
+ #def hrn_to_pl_slicename(hrn):
+ def nitos_slicename (self):
+ self._normalize()
+ leaf = self.leaf
+ sliver_id_parts = leaf.split(':')
+ name = sliver_id_parts[0]
+ name = re.sub('[^a-zA-Z0-9_]', '', name)
+ #return self.nitos_login_base() + '_' + name
+ return name
+
+ #def hrn_to_pl_authname(hrn):
+ def nitos_authname (self):
+ self._normalize()
+ return self.authority[-1]
+
+ def interface_name(self):
+ self._normalize()
+ return self.leaf
+
+ def nitos_login_base (self):
+ self._normalize()
+ if self.type and self.type.startswith('authority'):
+ base = self.leaf
+ else:
+ base = self.authority[-1]
+
+ # Fix up names of GENI Federates
+ base = base.lower()
+ base = re.sub('\\\[^a-zA-Z0-9]', '', base)
+
+ if len(base) > 20:
+ base = base[len(base)-20:]
+
+ return base
+
+
+if __name__ == '__main__':
+
+ #nitosxrn = NitosXrn(auth="omf.nitos",slicename="aminesl")
+ #slice_hrn = nitosxrn.get_hrn()
+ #slice_name = NitosXrn(xrn="omf.nitos.aminesl",type='slice').nitos_slicename()
+ slicename = "giorgos_n"
+ hrn = slicename_to_hrn("pla", "nitos", slicename)
+ print hrn
service = Services({'login': login})
rspec_node['services'].append(service)
+ if_index = 0
for private_ip in addresses.get('private', []):
if_xrn = PlXrn(auth=self.driver.hrn,
interface='node%s' % (instance.hostId))
#'netmask': private_ip['network'],
'type': 'ipv%s' % str(private_ip['version'])}]
rspec_node['interfaces'].append(interface)
+ if_index += 1
# slivers always provide the ssh service
for public_ip in addresses.get('public', []):
metadata['security_groups'] = group_name
if node.get('component_id'):
metadata['component_id'] = node['component_id']
+ if node.get('client_id'):
+ metadata['client_id'] = node['client_id']
server = self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
image=image_id,
key_name = key_name,
files=files,
meta=metadata,
name=instance_name)
- if node.get('client_id'):
- server.metadata['client_id'] = node['client_id']
created_instances.append(server)
except Exception, err:
return (slice, slivers)
- def get_nodes_and_links(self, slice_xrn, slice=None,slivers=[], options={}, requested_slivers={}):
+ def get_nodes_and_links(self, slice_xrn, slice=None,slivers=[], options={}):
# if we are dealing with a slice that has no node just return
# and empty list
if slice_xrn:
rspec_node['component_name'] = node['hostname']
rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.hrn, site['login_base']), 'authority+sa')
- if requested_slivers and node['hostname'] in requested_slivers:
- requested_sliver = requested_slivers[node['hostname']]
- if requested_sliver.get('client_id'):
- rspec_node['client_id'] = requested_sliver['client_id']
# do not include boot state (<available> element) in the manifest rspec
if not slice:
rspec_node['boot_state'] = node['boot_state']
- rspec_node['exclusive'] = 'false'
+
+ #add the exclusive tag to distinguish between Shared and Reservable nodes
+ if node['node_type'] == 'reservable':
+ rspec_node['exclusive'] = 'true'
+ else:
+ rspec_node['exclusive'] = 'false'
+
rspec_node['hardware_types'] = [HardwareType({'name': 'plab-pc'}),
HardwareType({'name': 'pc'})]
# only doing this because protogeni rspec needs
sliver = slivers[node['node_id']]
rspec_node['sliver_id'] = sliver['sliver_id']
rspec_node['slivers'] = [sliver]
+ for tag in sliver['tags']:
+ if tag['tagname'] == 'client_id':
+ rspec_node['client_id'] = tag['value']
# slivers always provide the ssh service
login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
return (rspec_nodes, links)
- def get_leases(self, slice=None, options={}):
+ def get_leases(self, slice_xrn=None, slice=None, options={}):
+ if slice_xrn and not slice:
+ return []
+
now = int(time.time())
filter={}
filter.update({'clip':now})
site_id=lease['site_id']
site=sites_dict[site_id]
- rspec_lease['lease_id'] = lease['lease_id']
+ #rspec_lease['lease_id'] = lease['lease_id']
rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], lease['hostname'])
- slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
rspec_lease['slice_id'] = slice_urn
rspec_lease['start_time'] = lease['t_from']
rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) / grain
return rspec_leases
- def get_rspec(self, slice_xrn=None, version = None, options={}, requested_slivers={}):
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
version_manager = VersionManager()
version = version_manager.get_version(version)
rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
- nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers, options,
- requested_slivers=requested_slivers)
+ if slice_xrn and not slivers:
+ nodes, links = [], []
+ else:
+ nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers, options)
rspec.version.add_nodes(nodes)
rspec.version.add_links(links)
# add sliver defaults
rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
- leases = self.get_leases(slice)
+ leases = self.get_leases(slice_xrn, slice)
rspec.version.add_leases(leases)
return rspec.toxml()
# add/remove slice from nodes
requested_slivers = {}
slivers = rspec.version.get_nodes_with_slivers()
- for node in slivers:
- hostname = None
- if node.get('component_name'):
- hostname = node.get('component_name').strip()
- elif node.get('component_id'):
- hostname = xrn_to_hostname(node.get('component_id').strip())
- if hostname:
- requested_slivers[hostname] = node
- nodes = slices.verify_slice_nodes(slice, requested_slivers.keys(), peer)
+ nodes = slices.verify_slice_nodes(slice, slivers, peer)
# add/remove links links
slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
# add/remove leases
- requested_leases = []
- kept_leases = []
- for lease in rspec.version.get_leases():
- requested_lease = {}
- if not lease.get('lease_id'):
- requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
- requested_lease['start_time'] = lease.get('start_time')
- requested_lease['duration'] = lease.get('duration')
- else:
- kept_leases.append(int(lease['lease_id']))
- if requested_lease.get('hostname'):
- requested_leases.append(requested_lease)
-
- leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
+ rspec_requested_leases = rspec.version.get_leases()
+ leases = slices.verify_slice_leases(slice, rspec_requested_leases, peer)
+ #requested_leases = []
+ #kept_leases = []
+ #for lease in rspec.version.get_leases():
+ # requested_lease = {}
+ # if not lease.get('lease_id'):
+ # requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
+ # requested_lease['start_time'] = lease.get('start_time')
+ # requested_lease['duration'] = lease.get('duration')
+ # else:
+ # kept_leases.append(int(lease['lease_id']))
+ # if requested_lease.get('hostname'):
+ # requested_leases.append(requested_lease)
+
+ #leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
# handle MyPLC peer association.
# only used by plc and ple.
slices.handle_peer(site, slice, persons, peer)
return aggregate.get_rspec(slice_xrn=slice_urn,
- version=rspec.version,
- requested_slivers = requested_slivers)
+ version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
slicename = hrn_to_pl_slicename(slice_hrn)
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
-from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename
+from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
+
+import time
MAXINT = 2L**31-1
return sfa_peer
- def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
-
- leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ def verify_slice_leases(self, slice, rspec_requested_leases, peer):
+
+ leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
- current_leases = [lease['lease_id'] for lease in leases]
- deleted_leases = list(set(current_leases).difference(kept_leases))
+
+ requested_leases = []
+ for lease in rspec_requested_leases:
+ requested_lease = {}
+ slice_name = hrn_to_pl_slicename(lease['slice_id'])
+ if slice_name != slice['name']:
+ continue
+ elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
+ continue
+
+ hostname = xrn_to_hostname(lease['component_id'])
+ # fill the requested node with nitos ids
+ requested_lease['name'] = slice['name']
+ requested_lease['hostname'] = hostname
+ requested_lease['t_from'] = int(lease['start_time'])
+ requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
+ requested_leases.append(requested_lease)
+
+
+
+ # prepare actual slice leases by lease_id
+ leases_by_id = {}
+ for lease in leases:
+ leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
+ 't_from': lease['t_from'], 't_until': lease['t_until']}
+
+ added_leases = []
+ kept_leases_id = []
+ deleted_leases_id = []
+ for lease_id in leases_by_id:
+ if leases_by_id[lease_id] not in requested_leases:
+ deleted_leases_id.append(lease_id)
+ else:
+ kept_leases_id.append(lease_id)
+ requested_leases.remove(leases_by_id[lease_id])
+ added_leases = requested_leases
+
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
- deleted=self.driver.shell.DeleteLeases(deleted_leases)
- for lease in requested_leases:
- added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['start_time']), int(lease['duration']) * grain + int(lease['start_time']))
+ self.driver.shell.DeleteLeases(deleted_leases_id)
+ for lease in added_leases:
+ self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
- def verify_slice_nodes(self, slice, requested_slivers, peer):
+ def verify_slice_nodes(self, slice, slivers, peer):
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
+ requested_slivers = []
+ tags = []
+ for node in slivers:
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name').strip()
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id').strip())
+ if node.get('client_id'):
+ tags.append({'slicename': slice['name'],
+ 'tagname': 'client_id',
+ 'value': node['client_id'],
+ 'node': hostname})
+ if hostname:
+ requested_slivers.append(hostname)
+
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(requested_slivers))
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
-
+
except:
logger.log_exc('Failed to add/remove slice from nodes')
+
+ # add tags
+ for tag in tags:
+ try:
+ self.driver.shell.AddSliceTag(tag['slicename'], tag['tagname'], tag['value'], tag['node'])
+ except:
+ logger.log_exc('Failed to add slice tag')
return nodes
def free_egre_key(self):
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Channel(Element):
+
+ fields = [
+ 'reservation_id',
+ 'channel_num',
+ 'frequency',
+ 'standard',
+ 'slice_id',
+ 'start_time',
+ 'duration',
+ ]
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Position3D(Element):
+
+ fields = [
+ 'x',
+ 'y',
+ 'z',
+ ]
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Spectrum(Element):
+
+ fields = []
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.spectrum import Spectrum
+from sfa.rspecs.elements.channel import Channel
+
+
+class NITOSv1Channel:
+
+ @staticmethod
+ def add_channels(xml, channels):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(channels) > 0:
+ # dirty hack that handles no resource manifest rspec
+ network_urn = "omf"
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+# spectrum_elems = xml.xpath('//spectrum')
+# spectrum_elem = xml.add_element('spectrum')
+
+# if len(spectrum_elems) > 0:
+# spectrum_elem = spectrum_elems[0]
+# elif len(channels) > 0:
+# spectrum_elem = xml.add_element('spectrum')
+# else:
+# spectrum_elem = xml
+
+ spectrum_elem = network_elem.add_instance('spectrum', [])
+
+ channel_elems = []
+ for channel in channels:
+ channel_fields = ['channel_num', 'frequency', 'standard']
+ channel_elem = spectrum_elem.add_instance('channel', channel, channel_fields)
+ channel_elems.append(channel_elem)
+
+
+ @staticmethod
+ def get_channels(xml, filter={}):
+ xpath = '//channel%s | //default:channel%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ channel_elems = xml.xpath(xpath)
+ return NITOSv1Channel.get_channel_objs(channel_elems)
+
+ @staticmethod
+ def get_channel_objs(channel_elems):
+ channels = []
+ for channel_elem in channel_elems:
+ channel = Channel(channel_elem.attrib, channel_elem)
+ channel['channel_num'] = channel_elem.attrib['channel_num']
+ channel['frequency'] = channel_elem.attrib['frequency']
+ channel['standard'] = channel_elem.attrib['standard']
+
+ channels.append(channel)
+ return channels
+
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.channel import Channel
+
+
+
+class NITOSv1Lease:
+
+ @staticmethod
+ def add_leases(xml, leases, channels):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(leases) > 0:
+ network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
+ lease_elems.append(lease_elem)
+
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+ # add reserved channels of this lease
+ #channels = [{'channel_id': 1}, {'channel_id': 2}]
+ for channel in channels:
+ if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
+ lease_elem.add_instance('channel', channel, ['channel_num'])
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return NITOSv1Lease.get_lease_objs(lease_elems)
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ channels = []
+ for lease_elem in lease_elems:
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+ #get channels
+ channel_elems = lease_elem.xpath('./default:channel | ./channel')
+ for channel_elem in channel_elems:
+ channel = Channel(channel_elem.attrib, channel_elem)
+ channel['slice_id'] = lease_elem.attrib['slice_id']
+ channel['start_time'] = lease_elem.attrib['start_time']
+ channel['duration'] = lease_elem.attrib['duration']
+ channel['channel_num'] = channel_elem.attrib['channel_num']
+ channels.append(channel)
+
+ return (leases, channels)
+
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.position_3d import Position3D
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+
+
+class NITOSv1Node:
+
+ @staticmethod
+ def add_nodes(xml, nodes):
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+ network_urn = nodes[0]['component_manager_id']
+ network_elem = xml.add_element('network', name = Xrn(network_urn).get_hrn())
+ else:
+ network_elem = xml
+
+ # needs to be improuved to retreive the gateway addr dynamically.
+ gateway_addr = 'nitlab.inf.uth.gr'
+
+ node_elems = []
+ for node in nodes:
+ node_fields = ['component_manager_id', 'component_id', 'boot_state']
+ node_elem = network_elem.add_instance('node', node, node_fields)
+ node_elems.append(node_elem)
+
+ # determine network hrn
+ network_hrn = None
+ if 'component_manager_id' in node and node['component_manager_id']:
+ network_hrn = Xrn(node['component_manager_id']).get_hrn()
+
+ # set component_name attribute and hostname element
+ if 'component_id' in node and node['component_id']:
+ component_name = Xrn(xrn=node['component_id']).get_leaf()
+ node_elem.set('component_name', component_name)
+ hostname_elem = node_elem.add_element('hostname')
+ hostname_elem.set_text(component_name)
+
+ # set site id
+ if 'authority_id' in node and node['authority_id']:
+ node_elem.set('site_id', node['authority_id'])
+
+ # add locaiton
+ location = node.get('location')
+ if location:
+ node_elem.add_instance('location', location, Location.fields)
+
+ # add 3D Position of the node
+ position_3d = node.get('position_3d')
+ if position_3d:
+ node_elem.add_instance('position_3d', position_3d, Position3D.fields)
+
+ # all nitos nodes are exculsive
+ exclusive_elem = node_elem.add_element('exclusive')
+ exclusive_elem.set_text('TRUE')
+
+ # In order to access nitos nodes, one need to pass through the nitos gateway
+ # here we advertise Nitos access gateway address
+ gateway_elem = node_elem.add_element('gateway')
+ gateway_elem.set_text(gateway_addr)
+
+ # add granularity of the reservation system
+ granularity = node.get('granularity')['grain']
+ if granularity:
+ #node_elem.add_instance('granularity', granularity, granularity.fields)
+ granularity_elem = node_elem.add_element('granularity')
+ granularity_elem.set_text(str(granularity))
+ # add hardware type
+ #hardware_type = node.get('hardware_type')
+ #if hardware_type:
+ # node_elem.add_instance('hardware_type', hardware_type)
+ hardware_type_elem = node_elem.add_element('hardware_type')
+ hardware_type_elem.set_text(node.get('hardware_type'))
+
+
+ if isinstance(node.get('interfaces'), list):
+ for interface in node.get('interfaces', []):
+ node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4'])
+
+ #if 'bw_unallocated' in node and node['bw_unallocated']:
+ # bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
+
+ PGv2Services.add_services(node_elem, node.get('services', []))
+ tags = node.get('tags', [])
+ if tags:
+ for tag in tags:
+ tag_elem = node_elem.add_element(tag['tagname'])
+ tag_elem.set_text(tag['value'])
+ NITOSv1Sliver.add_slivers(node_elem, node.get('slivers', []))
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ component_ids = []
+ for sliver in slivers:
+ filter = {}
+ if isinstance(sliver, str):
+ filter['component_id'] = '*%s*' % sliver
+ sliver = {}
+ elif 'component_id' in sliver and sliver['component_id']:
+ filter['component_id'] = '*%s*' % sliver['component_id']
+ if not filter:
+ continue
+ nodes = NITOSv1Node.get_nodes(xml, filter)
+ if not nodes:
+ continue
+ node = nodes[0]
+ NITOSv1Sliver.add_slivers(node, sliver)
+
+ @staticmethod
+ def remove_slivers(xml, hostnames):
+ for hostname in hostnames:
+ nodes = NITOSv1Node.get_nodes(xml, {'component_id': '*%s*' % hostname})
+ for node in nodes:
+ slivers = NITOSv1Sliver.get_slivers(node.element)
+ for sliver in slivers:
+ node.element.remove(sliver.element)
+
+ @staticmethod
+ def get_nodes(xml, filter={}):
+ xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ node_elems = xml.xpath(xpath)
+ return NITOSv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_nodes_with_slivers(xml):
+ xpath = '//node[count(sliver)>0] | //default:node[count(default:sliver)>0]'
+ node_elems = xml.xpath(xpath)
+ return NITOSv1Node.get_node_objs(node_elems)
+
+
+ @staticmethod
+ def get_node_objs(node_elems):
+ nodes = []
+ for node_elem in node_elems:
+ node = Node(node_elem.attrib, node_elem)
+ if 'site_id' in node_elem.attrib:
+ node['authority_id'] = node_elem.attrib['site_id']
+ # get location
+ location_elems = node_elem.xpath('./default:location | ./location')
+ locations = [loc_elem.get_instance(Location) for loc_elem in location_elems]
+ if len(locations) > 0:
+ node['location'] = locations[0]
+ # get bwlimit
+ bwlimit_elems = node_elem.xpath('./default:bw_limit | ./bw_limit')
+ bwlimits = [bwlimit_elem.get_instance(BWlimit) for bwlimit_elem in bwlimit_elems]
+ if len(bwlimits) > 0:
+ node['bwlimit'] = bwlimits[0]
+ # get interfaces
+ iface_elems = node_elem.xpath('./default:interface | ./interface')
+ ifaces = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
+ node['interfaces'] = ifaces
+ # get services
+ node['services'] = PGv2Services.get_services(node_elem)
+ # get slivers
+ node['slivers'] = NITOSv1Sliver.get_slivers(node_elem)
+ # get tags
+ node['tags'] = NITOSv1PLTag.get_pl_tags(node_elem, ignore=Node.fields+["hardware_type"])
+ # get hardware types
+ hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
+ node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
+
+ # temporary... play nice with old slice manager rspec
+ if not node['component_name']:
+ hostname_elem = node_elem.find("hostname")
+ if hostname_elem != None:
+ node['component_name'] = hostname_elem.text
+
+ nodes.append(node)
+ return nodes
+
--- /dev/null
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.pltag import PLTag
+
+class NITOSv1PLTag:
+ @staticmethod
+ def add_pl_tag(xml, name, value):
+ for pl_tag in pl_tags:
+ pl_tag_elem = xml.add_element(name)
+ pl_tag_elem.set_text(value)
+
+ @staticmethod
+ def get_pl_tags(xml, ignore=[]):
+ pl_tags = []
+ for elem in xml.iterchildren():
+ if elem.tag not in ignore:
+ pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})
+ pl_tags.append(pl_tag)
+ return pl_tags
+
--- /dev/null
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XmlElement
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+
+from sfa.planetlab.plxrn import PlXrn
+
+class NITOSv1Sliver:
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ if not slivers:
+ return
+ if not isinstance(slivers, list):
+ slivers = [slivers]
+ for sliver in slivers:
+ sliver_elem = xml.add_instance('sliver', sliver, ['name'])
+ tags = sliver.get('tags', [])
+ if tags:
+ for tag in tags:
+ NITOSv1Sliver.add_sliver_attribute(sliver_elem, tag['tagname'], tag['value'])
+ if sliver.get('sliver_id'):
+ name = PlXrn(xrn=sliver.get('sliver_id')).pl_slicename()
+ sliver_elem.set('name', name)
+
+ @staticmethod
+ def add_sliver_attribute(xml, name, value):
+ elem = xml.add_element(name)
+ elem.set_text(value)
+
+ @staticmethod
+ def get_sliver_attributes(xml):
+ attribs = []
+ for elem in xml.iterchildren():
+ if elem.tag not in Sliver.fields:
+ xml_element = XmlElement(elem, xml.namespaces)
+ instance = Element(fields=xml_element, element=elem)
+ instance['name'] = elem.tag
+ instance['value'] = elem.text
+ attribs.append(instance)
+ return attribs
+
+ @staticmethod
+ def get_slivers(xml, filter={}):
+ xpath = './default:sliver | ./sliver'
+ sliver_elems = xml.xpath(xpath)
+ slivers = []
+ for sliver_elem in sliver_elems:
+ sliver = Sliver(sliver_elem.attrib,sliver_elem)
+ if 'component_id' in xml.attrib:
+ sliver['component_id'] = xml.attrib['component_id']
+ sliver['tags'] = NITOSv1Sliver.get_sliver_attributes(sliver_elem)
+ slivers.append(sliver)
+ return slivers
+
for attribute in attributes:
if attribute['name'] == 'initscript':
xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
- elif tag['tagname'] == 'flack_info':
+ elif attribute['tagname'] == 'flack_info':
attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
attrib_dict = eval(tag['value'])
for (key, value) in attrib_dict.items():
else:
network_elem = xml
- lease_elems = []
- for lease in leases:
- lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
- lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
logger.debug("SFAV1LEASE \t add_lease lease %s" %(lease))
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+
+
+# lease_elems = []
+# for lease in leases:
+# lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+# lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+# lease_elems.append(lease_elem)
+
@staticmethod
def get_leases(xml, filter={}):
@staticmethod
def get_lease_objs(lease_elems):
- leases = []
+ leases = []
for lease_elem in lease_elems:
- lease = Lease(lease_elem.attrib, lease_elem)
- if lease.get('lease_id'):
- lease['lease_id'] = lease_elem.attrib['lease_id']
- lease['component_id'] = lease_elem.attrib['component_id']
- lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = lease_elem.attrib['start_time']
- lease['duration'] = lease_elem.attrib['duration']
-
- leases.append(lease)
- return leases
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+
+ return leases
+
+
+
+
+
+# leases = []
+# for lease_elem in lease_elems:
+# lease = Lease(lease_elem.attrib, lease_elem)
+# if lease.get('lease_id'):
+# lease['lease_id'] = lease_elem.attrib['lease_id']
+# lease['component_id'] = lease_elem.attrib['component_id']
+# lease['slice_id'] = lease_elem.attrib['slice_id']
+# lease['start_time'] = lease_elem.attrib['start_time']
+# lease['duration'] = lease_elem.attrib['duration']
+
+# leases.append(lease)
+# return leases
if location:
node_elem.add_instance('location', location, Location.fields)
- # add granularity of the reservation system
- granularity = node.get('granularity')
- if granularity:
- node_elem.add_instance('granularity', granularity, granularity.fields)
+ # add exclusive tag to distinguish between Reservable and Shared nodes
+ exclusive_elem = node_elem.add_element('exclusive')
+ if node.get('exclusive') and node.get('exclusive') == 'true':
+ exclusive_elem.set_text('TRUE')
+ # add granularity of the reservation system
+ granularity = node.get('granularity')
+ if granularity:
+ node_elem.add_instance('granularity', granularity, granularity.fields)
+ else:
+ exclusive_elem.set_text('FALSE')
if isinstance(node.get('interfaces'), list):
self.user_options = user_options
self.elements = {}
if rspec:
- self.parse_xml(rspec)
+ if version:
+ self.version = self.version_manager.get_version(version)
+ self.parse_xml(rspec, version)
+ else:
+ self.parse_xml(rspec)
elif version:
self.create(version)
else:
"""
self.version = self.version_manager.get_version(version)
self.namespaces = self.version.namespaces
- self.parse_xml(self.version.template)
+ self.parse_xml(self.version.template, self.version)
# eg. 2011-03-23T19:53:28Z
date_format = '%Y-%m-%dT%H:%M:%SZ'
now = datetime.utcnow()
self.xml.set('generated', generated_ts)
- def parse_xml(self, xml):
+ def parse_xml(self, xml, version=None):
self.xml.parse_xml(xml)
- self.version = None
- if self.xml.schema:
- self.version = self.version_manager.get_version_by_schema(self.xml.schema)
- else:
- #raise InvalidRSpec('unknown rspec schema: %s' % schema)
- # TODO: Should start raising an exception once SFA defines a schema.
- # for now we just default to sfa
- self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
+ if not version:
+ if self.xml.schema:
+ self.version = self.version_manager.get_version_by_schema(self.xml.schema)
+ else:
+ #raise InvalidRSpec('unknown rspec schema: %s' % schema)
+ # TODO: Should start raising an exception once SFA defines a schema.
+ # for now we just default to sfa
+ self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
self.version.xml = self.xml
self.namespaces = self.xml.namespaces
SLIVER_TYPE='SLIVER_TYPE',
LEASE='LEASE',
GRANULARITY='GRANULARITY',
+ SPECTRUM='SPECTRUM',
+ CHANNEL='CHANNEL',
+ POSITION_3D ='POSITION_3D',
)
class RSpecElement:
--- /dev/null
+from copy import deepcopy
+from lxml import etree
+
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn
+from sfa.rspecs.version import RSpecVersion
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
+from sfa.rspecs.elements.versions.nitosv1Node import NITOSv1Node
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1Lease import NITOSv1Lease
+from sfa.rspecs.elements.versions.nitosv1Channel import NITOSv1Channel
+
+class NITOSv1(RSpecVersion):
+ enabled = True
+ type = 'NITOS'
+ content_type = '*'
+ version = '1'
+ schema = None
+ namespace = None
+ extensions = {}
+ namespaces = None
+ template = '<RSpec type="%s"></RSpec>' % type
+
+ # Network
+ def get_networks(self):
+ network_elems = self.xml.xpath('//network')
+ networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+ network_elem in network_elems]
+ return networks
+
+
+ def add_network(self, network):
+ network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+ if not network_tags:
+ network_tag = self.xml.add_element('network', name=network)
+ else:
+ network_tag = network_tags[0]
+ return network_tag
+
+
+ # Nodes
+
+ def get_nodes(self, filter=None):
+ return NITOSv1Node.get_nodes(self.xml, filter)
+
+ def get_nodes_with_slivers(self):
+ return NITOSv1Node.get_nodes_with_slivers(self.xml)
+
+ def add_nodes(self, nodes, network = None, no_dupes=False):
+ NITOSv1Node.add_nodes(self.xml, nodes)
+
+ def merge_node(self, source_node_tag, network, no_dupes=False):
+ if no_dupes and self.get_node_element(node['hostname']):
+ # node already exists
+ return
+
+ network_tag = self.add_network(network)
+ network_tag.append(deepcopy(source_node_tag))
+
+ # Slivers
+
+ def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+ # add slice name to network tag
+ network_tags = self.xml.xpath('//network')
+ if network_tags:
+ network_tag = network_tags[0]
+ network_tag.set('slice', urn_to_hrn(sliver_urn)[0])
+
+ # add slivers
+ sliver = {'name':sliver_urn,
+ 'pl_tags': attributes}
+ for hostname in hostnames:
+ if sliver_urn:
+ sliver['name'] = sliver_urn
+ node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+ if not node_elems:
+ continue
+ node_elem = node_elems[0]
+ NITOSv1Sliver.add_slivers(node_elem.element, sliver)
+
+ # remove all nodes without slivers
+ if not append:
+ for node_elem in self.get_nodes():
+ if not node_elem['slivers']:
+ parent = node_elem.element.getparent()
+ parent.remove(node_elem.element)
+
+
+ def remove_slivers(self, slivers, network=None, no_dupes=False):
+ NITOSv1Node.remove_slivers(self.xml, slivers)
+
+ def get_slice_attributes(self, network=None):
+ attributes = []
+ nodes_with_slivers = self.get_nodes_with_slivers()
+ for default_attribute in self.get_default_sliver_attributes(network):
+ attribute = default_attribute.copy()
+ attribute['node_id'] = None
+ attributes.append(attribute)
+ for node in nodes_with_slivers:
+ nodename=node['component_name']
+ sliver_attributes = self.get_sliver_attributes(nodename, network)
+ for sliver_attribute in sliver_attributes:
+ sliver_attribute['node_id'] = nodename
+ attributes.append(sliver_attribute)
+ return attributes
+
+
+ def add_sliver_attribute(self, component_id, name, value, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' % component_id})
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ slivers = NITOSv1Sliver.get_slivers(node)
+ if slivers:
+ sliver = slivers[0]
+ NITOSv1Sliver.add_sliver_attribute(sliver, name, value)
+ else:
+ # should this be an assert / raise an exception?
+ logger.error("WARNING: failed to find component_id %s" % component_id)
+
+ def get_sliver_attributes(self, component_id, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' % component_id})
+ attribs = []
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ slivers = NITOSv1Sliver.get_slivers(node.element)
+ if slivers is not None and isinstance(slivers, list) and len(slivers) > 0:
+ sliver = slivers[0]
+ attribs = NITOSv1Sliver.get_sliver_attributes(sliver.element)
+ return attribs
+
+ def remove_sliver_attribute(self, component_id, name, value, network=None):
+ attribs = self.get_sliver_attributes(component_id)
+ for attrib in attribs:
+ if attrib['name'] == name and attrib['value'] == value:
+ #attrib.element.delete()
+ parent = attrib.element.getparent()
+ parent.remove(attrib.element)
+
+ def add_default_sliver_attribute(self, name, value, network=None):
+ if network:
+ defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
+ else:
+ defaults = self.xml.xpath("//sliver_defaults")
+ if not defaults:
+ if network:
+ network_tag = self.xml.xpath("//network[@name='%s']" % network)
+ else:
+ network_tag = self.xml.xpath("//network")
+ if isinstance(network_tag, list):
+ network_tag = network_tag[0]
+ defaults = network_tag.add_element('sliver_defaults')
+ elif isinstance(defaults, list):
+ defaults = defaults[0]
+ NITOSv1Sliver.add_sliver_attribute(defaults, name, value)
+
+ def get_default_sliver_attributes(self, network=None):
+ if network:
+ defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
+ else:
+ defaults = self.xml.xpath("//sliver_defaults")
+ if not defaults: return []
+ return NITOSv1Sliver.get_sliver_attributes(defaults[0])
+
+ def remove_default_sliver_attribute(self, name, value, network=None):
+ attribs = self.get_default_sliver_attributes(network)
+ for attrib in attribs:
+ if attrib['name'] == name and attrib['value'] == value:
+ #attrib.element.delete()
+ parent = attrib.element.getparent()
+ parent.remove(attrib.element)
+
+ # Links
+
+ def get_links(self, network=None):
+ return PGv2Link.get_links(self.xml)
+
+ def get_link_requests(self):
+ return PGv2Link.get_link_requests(self.xml)
+
+ def add_links(self, links):
+ networks = self.get_networks()
+ if len(networks) > 0:
+ xml = networks[0].element
+ else:
+ xml = self.xml
+ PGv2Link.add_links(xml, links)
+
+ def add_link_requests(self, links):
+ PGv2Link.add_link_requests(self.xml, links)
+
+ # utility
+
+ def merge(self, in_rspec):
+ """
+ Merge contents for specified rspec with current rspec
+ """
+
+ if not in_rspec:
+ return
+
+ from sfa.rspecs.rspec import RSpec
+ if isinstance(in_rspec, RSpec):
+ rspec = in_rspec
+ else:
+ rspec = RSpec(in_rspec)
+ if rspec.version.type.lower() == 'protogeni':
+ from sfa.rspecs.rspec_converter import RSpecConverter
+ in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
+ rspec = RSpec(in_rspec)
+
+ # just copy over all networks
+ current_networks = self.get_networks()
+ networks = rspec.version.get_networks()
+ for network in networks:
+ current_network = network.get('name')
+ if current_network and current_network not in current_networks:
+ self.xml.append(network.element)
+ current_networks.append(current_network)
+
+ # Leases
+
+ def get_leases(self, filter=None):
+ return NITOSv1Lease.get_leases(self.xml, filter)
+
+ def add_leases(self, leases, channels, network = None, no_dupes=False):
+ NITOSv1Lease.add_leases(self.xml, leases, channels)
+
+ # Spectrum
+
+ def get_channels(self, filter=None):
+ return NITOSv1Channel.get_channels(self.xml, filter)
+
+ def add_channels(self, channels, network = None, no_dupes=False):
+ NITOSv1Channel.add_channels(self.xml, channels)
+
+
+
+if __name__ == '__main__':
+ from sfa.rspecs.rspec import RSpec
+ from sfa.rspecs.rspec_elements import *
+ r = RSpec('/tmp/resources.rspec')
+ r.load_rspec_elements(SFAv1.elements)
+ print r.get(RSpecElements.NODE)
logger.info("load from xml, keys=%s"%xml_dict.keys())
return make_record_dict (xml_dict)
+####################
+# augment local records with data from builtin relationships
+# expose related objects as a list of hrns
+# we pick names that clearly won't conflict with the ones used in the old approach,
+# were the relationships data came from the testbed side
+# for each type, a dict of the form {<field-name-exposed-in-record>:<alchemy_accessor_name>}
+# so after that, an 'authority' record will e.g. have a 'reg-pis' field with the hrns of its pi-users
+augment_map={'authority': {'reg-pis':'reg_pis',},
+ 'slice': {'reg-researchers':'reg_researchers',},
+ 'user': {'reg-pi-authorities':'reg_authorities_as_pi',
+ 'reg-slices':'reg_slices_as_researcher',},
+ }
+
+def augment_with_sfa_builtins (local_record):
+ # don't ruin the import of that file in a client world
+ from sfa.util.xrn import Xrn
+ # add a 'urn' field
+ setattr(local_record,'reg-urn',Xrn(xrn=local_record.hrn,type=local_record.type).urn)
+ # users have keys and this is needed to synthesize 'users' sent over to CreateSliver
+ if local_record.type=='user':
+ user_keys = [ key.key for key in local_record.reg_keys ]
+ setattr(local_record, 'reg-keys', user_keys)
+ # search in map according to record type
+ type_map=augment_map.get(local_record.type,{})
+ # use type-dep. map to do the job
+ for (field_name,attribute) in type_map.items():
+ # get related objects
+ related_records = getattr(local_record,attribute,[])
+ hrns = [ r.hrn for r in related_records ]
+ setattr (local_record, field_name, hrns)
+
+
# fallback
return "** undef_datetime **"
- def todict (self):
+ # it may be important to exclude relationships, which fortunately
+ #
+ def todict (self, exclude_types=[]):
d=self.__dict__
- keys=[k for k in d.keys() if not k.startswith('_')]
+ def exclude (k,v):
+ if k.startswith('_'): return True
+ if exclude_types:
+ for exclude_type in exclude_types:
+ if isinstance (v,exclude_type): return True
+ return False
+ keys=[k for (k,v) in d.items() if not exclude(k,v)]
return dict ( [ (k,d[k]) for k in keys ] )
def toxml(self):
if os.path.isfile(path + '/' + 'xmlsec1'):
self.xmlsec_path = path + '/' + 'xmlsec1'
break
+ if not self.xmlsec_path:
+ logger.warn("Could not locate binary for xmlsec1 - SFA will be unable to sign stuff !!")
def get_subject(self):
if not self.gidObject:
self.decode()
- return self.gidObject.get_printable_subject()
+ return self.gidObject.get_subject()
# sounds like this should be __repr__ instead ??
def get_summary_tostring(self):
# you have loaded an existing signed credential, do not call encode() or sign() on it.
def sign(self):
- if not self.issuer_privkey or not self.issuer_gid:
+ if not self.issuer_privkey:
+ logger.warn("Cannot sign credential (no private key)")
+ return
+ if not self.issuer_gid:
+ logger.warn("Cannot sign credential (no issuer gid)")
return
doc = parseString(self.get_xml())
sigs = doc.getElementsByTagName("signatures")[0]
print self.dump_string(*args, **kwargs)
- def dump_string(self, dump_parents=False):
+ def dump_string(self, dump_parents=False, show_xml=False):
result=""
result += "CREDENTIAL %s\n" % self.get_subject()
filename=self.get_filename()
result += "\nPARENT"
result += self.parent.dump_string(True)
+ if show_xml:
+ try:
+ tree = etree.parse(StringIO(self.xml))
+ aside = etree.tostring(tree, pretty_print=True)
+ result += "\nXML\n"
+ result += aside
+ result += "\nEnd XML\n"
+ except:
+ import traceback
+ print "exc. Credential.dump_string / XML"
+ traceback.print_exc()
+
return result
if not xrn: xrn = ""
# user has specified xrn : guess if urn or hrn
self.id = id
+ self.type = type
+
if Xrn.is_urn(xrn):
self.hrn=None
self.urn=xrn
- if id:
- self.urn = "%s-%s" % (self.urn, str(id))
self.urn_to_hrn()
- if type:
- self.type=type
+ if id:
self.hrn_to_urn()
else:
self.urn=None
update the authority section of an existing urn
"""
authority_hrn = self.get_authority_hrn()
- old_hrn_parts = Xrn.hrn_split(self.hrn)
- old_hrn_parts[0] = authority
- hrn = ".".join(old_hrn_parts)
- self.hrn = hrn
- self.hrn_to_urn()
+ if not authority_hrn.startswith(authority+"."):
+ self.hrn = authority + "." + self.hrn
+ self.hrn_to_urn()
self._normalize()
def urn_to_hrn(self):
self.authority = Xrn.hrn_auth_list(self.hrn)
name = Xrn.hrn_leaf(self.hrn)
# separate name from id
- name_parts = name.split("-")
- name = name_parts[0]
authority_string = self.get_authority_urn()
if self.type == None: