%define name sfa
%define version 2.1
-%define taglevel 14
+%define taglevel 16
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Mon Oct 01 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-16
+- various tweaks for the nitos driver
+
+* Wed Sep 26 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-15
+- first stab at a driver for the NITOS/OMF testbed (sep. pkg)
+- deeper cleanup of the data-dependencies between SFA and the testbed
+- in particular, sfi create issues Resolve(details=False)
+- for that purpose, Resolve exposes reg-* keys for SFA builtins
+- which in turn allows sfi list to show PIs, slice members and keys
+- NOTE: sfa-config-tty is known to be broken w/ less frequently used func's
+- Shows stacktrace when startup fails (DB conn, wrong flavour, etc..)
+
* Mon Sep 17 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-14
- configurable data-dir (/var/lib/sfa)
- no more dependent on myplc-config
-
+###
+#
+# Thierry - 2012 sept 21
+#
+# it seems terribly wrong that the client should decide to use PG- or PL- related code
+# esp. in a context where we're trying to have more and more kinds of testbeds involved
+#
+# also, the 'users' filed that CreateSliver is expecting (the key point here is to get this right)
+# is specified to have at least a urn and a list of keys, both of these being supported natively
+# in the sfa db
+# So long story short, it seems to me that we should have a common code that fills in 'urn' and 'keys'
+# and then code that tentatively tries to add as much extra info that we can get on these users
+#
+# the fact e.g. that PlanetLab insists on getting a first_name and last_name is not
+# exactly consistent with the GENI spec. of CreateSliver
+#
def pg_users_arg(records):
users = []
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'],
- 'keys': record['keys']}
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
+ }
users.append(user)
return users
-def sfa_users_arg(records, slice_record):
+def sfa_users_arg (records, slice_record):
users = []
for record in records:
if record['type'] != 'user':
continue
- try:
- user = {'urn': record['geni_urn'], #
- 'keys': record['keys'],
- 'email': record['email'], # needed for MyPLC
- 'person_id': record['person_id'], # needed for MyPLC
- 'first_name': record['first_name'], # needed for MyPLC
- 'last_name': record['last_name'], # needed for MyPLC
- 'slice_record': slice_record, # needed for legacy refresh peer
- 'key_ids': record['key_ids'] # needed for legacy refresh peer
- }
- except:
- # handle NITOS user args
- user = {'urn': record['geni_urn'],
- 'keys': record['keys'],
- 'email': record['email'],
- 'user_id': record['user_id'],
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
'slice_record': slice_record,
}
-
+ # fill as much stuff as possible from planetlab or similar
+ # note that reg-email is not yet available
+ pl_fields = ['email', 'person_id', 'first_name', 'last_name', 'key_ids']
+ nitos_fields = [ 'email', 'user_id' ]
+ extra_fields = list ( set(pl_fields).union(set(nitos_fields)))
+ # try to fill all these in
+ for field in extra_fields:
+ if record.has_key(field): user[field]=record[field]
users.append(user)
- return users
+ return users
def sfa_to_pg_users_arg(users):
f.close()
return
+# used in sfi list
+def terminal_render (records,options):
+ # sort records by type
+ grouped_by_type={}
+ for record in records:
+ type=record['type']
+ if type not in grouped_by_type: grouped_by_type[type]=[]
+ grouped_by_type[type].append(record)
+ group_types=grouped_by_type.keys()
+ group_types.sort()
+ for type in group_types:
+ group=grouped_by_type[type]
+# print 20 * '-', type
+ try: renderer=eval('terminal_render_'+type)
+ except: renderer=terminal_render_default
+ for record in group: renderer(record,options)
+
+def render_plural (how_many, name,names=None):
+ if not names: names="%ss"%name
+ if how_many<=0: return "No %s"%name
+ elif how_many==1: return "1 %s"%name
+ else: return "%d %s"%(how_many,names)
+
+def terminal_render_default (record,options):
+ print "%s (%s)" % (record['hrn'], record['type'])
+def terminal_render_user (record, options):
+ print "%s (User)"%record['hrn'],
+ if record.get('reg-pi-authorities',None): print " [PI at %s]"%(" and ".join(record['reg-pi-authorities'])),
+ if record.get('reg-slices',None): print " [IN slices %s]"%(" and ".join(record['reg-slices'])),
+ user_keys=record.get('reg-keys',[])
+ if not options.verbose:
+ print " [has %s]"%(render_plural(len(user_keys),"key"))
+ else:
+ print ""
+ for key in user_keys: print 8*' ',key.strip("\n")
+
+def terminal_render_slice (record, options):
+ print "%s (Slice)"%record['hrn'],
+ if record.get('reg-researchers',None): print " [USERS %s]"%(" and ".join(record['reg-researchers'])),
+# print record.keys()
+ print ""
+def terminal_render_authority (record, options):
+ print "%s (Authority)"%record['hrn'],
+ if record.get('reg-pis',None): print " [PIS %s]"%(" and ".join(record['reg-pis'])),
+ print ""
+def terminal_render_node (record, options):
+ print "%s (Node)"%record['hrn']
+
# minimally check a key argument
def check_ssh_key (key):
good_ssh_key = r'^.*(?:ssh-dss|ssh-rsa)[ ]+[A-Za-z0-9+/=]+(?: .*)?$'
if command == 'list':
parser.add_option("-r", "--recursive", dest="recursive", action='store_true',
help="list all child records", default=False)
+ parser.add_option("-v", "--verbose", dest="verbose", action='store_true',
+ help="gives details, like user keys", default=False)
if command in ("delegate"):
parser.add_option("-u", "--user",
action="store_true", dest="delegate_user", default=False,
# Main: parse arguments and dispatch to command
#
def dispatch(self, command, command_options, command_args):
- return getattr(self, command)(command_options, command_args)
+ method=getattr(self, command,None)
+ if not method:
+ print "Unknown command %s"%command
+ return
+ return method(command_options, command_args)
def main(self):
self.sfi_parser = self.create_parser()
try:
self.dispatch(command, command_options, command_args)
- except KeyError:
- self.logger.critical ("Unknown command %s"%command)
+ except:
+ self.logger.log_exc ("sfi command %s failed"%command)
sys.exit(1)
return
raise Exception, "Not enough parameters for the 'list' command"
# filter on person, slice, site, node, etc.
- # THis really should be in the self.filter_records funct def comment...
+ # This really should be in the self.filter_records funct def comment...
list = filter_records(options.type, list)
- for record in list:
- print "%s (%s)" % (record['hrn'], record['type'])
+ terminal_render (list, options)
if options.file:
save_records_to_file(options.file, list, options.fileformat)
return
self.print_help()
sys.exit(1)
hrn = args[0]
- # xxx should set details=True here but that's not in the xmlrpc interface ...
- # record_dicts = self.registry().Resolve(hrn, self.my_credential_string, details=True)
- record_dicts = self.registry().Resolve(hrn, self.my_credential_string)
+ # explicitly require Resolve to run in details mode
+ record_dicts = self.registry().Resolve(hrn, self.my_credential_string, {'details':True})
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
self.logger.error("No record of type %s"% options.type)
# keys: [<ssh key A>, <ssh key B>]
# }]
users = []
+ # xxx Thierry 2012 sept. 21
+ # contrary to what I was first thinking, calling Resolve with details=False does not yet work properly here
+ # I am turning details=True on again on a - hopefully - temporary basis, just to get this whole thing to work again
slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
- if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]:
+ # slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string], {'details':True})
+ if slice_records and 'reg-researchers' in slice_records[0] and slice_records[0]['reg-researchers']:
slice_record = slice_records[0]
- user_hrns = slice_record['researcher']
+ user_hrns = slice_record['reg-researchers']
user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
slice_urn=xrn.get_urn()
slice_hrn=xrn.get_hrn()
+ # xxx sounds like GetTicket is dead, but if that gets resurrected we might wish
+ # to pass 'users' over to the driver as well
return self.driver.get_ticket (slice_urn, slice_hrn, creds, rspec, options)
# to the actual method calls anyway
self.manager = manager(config)
else:
+ # that's what happens when there's something wrong with the db
+ # or any bad stuff of that kind at startup time
+ logger.log_exc("Failed to create a manager, startup sequence is broken")
raise SfaAPIError,"Argument to ManagerWrapper must be a module or class"
self.interface = interface
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
-from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey
+from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey, \
+ augment_with_sfa_builtins
from sfa.storage.alchemy import dbsession
+### the types that we need to exclude from sqlobjects before being able to dump
+# them on the xmlrpc wire
+from sqlalchemy.orm.collections import InstrumentedList
class RegistryManager:
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
- # xxx for the record only
- # used to call this, which was wrong, now all needed data is natively is our DB
- # self.driver.augment_records_with_testbed_info (record.__dict__)
- # likewise, we deprecate is_enabled which was not really useful
- # if not self.driver.is_enabled (record.__dict__): ...
- # xxx for the record only
-
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
# object itself.
if type:
local_records = local_records.filter_by(type=type)
local_records=local_records.all()
- logger.info("Resolve details=%s: local_records=%s (type=%s)"%(details,local_records,type))
+
+ for local_record in local_records:
+ augment_with_sfa_builtins (local_record)
+
+ logger.info("Resolve, (details=%s,type=%s) local_records=%s "%(details,type,local_records))
local_dicts = [ record.__dict__ for record in local_records ]
if details:
# xxx somehow here calling dict(record) issues a weird error
# however record.todict() seems to work fine
# records.extend( [ dict(record) for record in local_records ] )
- records.extend( [ record.todict() for record in local_records ] )
+ records.extend( [ record.todict(exclude_types=[InstrumentedList]) for record in local_records ] )
+
if not records:
raise RecordNotFound(str(hrns))
records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn))
else:
records = dbsession.query(RegRecord).filter_by(authority=hrn)
- record_dicts=[ record.todict() for record in records ]
+ # so that sfi list can show more than plain names...
+ for record in records: augment_with_sfa_builtins (record)
+ record_dicts=[ record.todict(exclude_types=[InstrumentedList]) for record in records ]
return record_dicts
####################
# utility for handling relationships among the SFA objects
- # given that the SFA db does not handle this sort of relationsships
- # it will rely on side-effects in the testbed to keep this persistent
# subject_record describes the subject of the relationships
# ref_record contains the target values for the various relationships we need to manage
- # (to begin with, this is just the slice x person relationship)
+ # (to begin with, this is just the slice x person (researcher) and authority x person (pi) relationships)
def update_driver_relations (self, subject_obj, ref_obj):
type=subject_obj.type
#for (k,v) in subject_obj.__dict__.items(): print k,'=',v
Mixed(Parameter(str, "Human readable name (hrn or urn)"),
Parameter(list, "List of Human readable names ([hrn])")),
Mixed(Parameter(str, "Credential string"),
- Parameter(list, "List of credentials)"))
+ Parameter(list, "List of credentials)")),
+ Parameter(dict, "options"),
]
# xxx used to be [SfaRecord]
returns = [Parameter(dict, "registry record")]
- def call(self, xrns, creds):
- # xxx should be ar arg
- details=False
+ def call(self, xrns, creds, options={}):
+ # use details=False by default, only when explicitly specified do we want
+ # to mess with the testbed details
+ if 'details' in options: details=options['details']
+ else: details=False
type = None
if not isinstance(xrns, types.ListType):
type = Xrn(xrns).get_type()
rspec_nodes.append(rspec_node)
return rspec_nodes
- def get_leases_and_channels(self, slice=None, options={}):
+ def get_leases_and_channels(self, slice=None, slice_xrn=None, options={}):
slices = self.driver.shell.getSlices({}, [])
nodes = self.driver.shell.getNodes({}, [])
reserved_channels = self.driver.shell.getReservedChannels()
grain = self.driver.testbedInfo['grain']
+ if slice_xrn and not slice:
+ return ([], [])
+
if slice:
all_leases = []
all_leases.extend(leases)
slicename = slc['slice_name']
break
- slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
rspec_channel['slice_id'] = slice_urn
rspec_channels.append(rspec_channel)
slicename = slc['slice_name']
break
- slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
rspec_lease['slice_id'] = slice_urn
rspec_lease['start_time'] = lease['start_time']
rspec_lease['duration'] = (int(lease['end_time']) - int(lease['start_time'])) / int(grain)
rspec.version.add_channels(channels)
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
- leases, channels = self.get_leases_and_channels(slice)
+ leases, channels = self.get_leases_and_channels(slice, slice_xrn)
rspec.version.add_leases(leases, channels)
return rspec.toxml()
cached_requested = options.get('cached', True)
version_manager = VersionManager()
# get the rspec's return format from options
- rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ #rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ # rspec's return format for nitos aggregate is version NITOS 1
+ rspec_version = version_manager.get_version('NITOS 1')
version_string = "rspec_%s" % (rspec_version)
#panos adding the info option to the caching key (can be improved)
# ensure slice record exists
slice = slices.verify_slice(slice_hrn, slice_record, sfa_peer, options=options)
# ensure user records exists
- #users = slices.verify_users(slice_hrn, slice, users, sfa_peer, options=options)
+ users = slices.verify_users(slice_hrn, slice, users, sfa_peer, options=options)
# add/remove leases (nodes and channels)
# a lease in Nitos RSpec case is a reservation of nodes and channels grouped by (slice,timeslot)
# release all reserved nodes and channels for that slice
try:
- print "Nodes: %s\nChannels: %s" %(slice_reserved_nodes_ids, slice_reserved_channels_ids)
released_nodes = self.shell.releaseNodes({'reservation_ids': slice_reserved_nodes_ids})
released_channels = self.shell.releaseChannels({'reservation_ids': slice_reserved_channels_ids})
except:
if slice_name != slice['slice_name']:
continue
hostname = xrn_to_hostname(node['component_id'])
- nitos_node = self.driver.filter_nitos_results(nitos_nodes, {'hostname': hostname})[0]
+ nitos_node = self.driver.filter_nitos_results(nitos_nodes, {'hostname': hostname})
+ if not nitos_node:
+ continue
+ nitos_node = nitos_node[0]
# fill the requested node with nitos ids
requested_node['slice_id'] = slice['slice_id']
requested_node['node_id'] = nitos_node['node_id']
requested_nodes.remove(reserved_nodes_by_id[reservation_id])
added_nodes = requested_nodes
- print "NODES: \nAdded: %s \nDeleted: %s\nKept: %s" %(added_nodes,deleted_nodes_id,kept_nodes_id)
try:
deleted=self.driver.shell.releaseNodes({'reservation_ids': deleted_nodes_id})
requested_channels.remove(reserved_channels_by_id[reservation_id])
added_channels = requested_channels
- print "CHANNELS: \nAdded: %s \nDeleted: %s\nKept: %s" %(added_channels,deleted_channels_id,kept_channels_id)
try:
deleted=self.driver.shell.releaseChannels({'reservation_ids': deleted_channels_id})
slices = self.driver.shell.getSlices({}, [])
slices = self.driver.filter_nitos_results(slices, {'slice_name': slicename})
if not slices:
- slice = {'name': slicename}
+ slice = {'slice_name': slicename}
# add the slice
slice['slice_id'] = self.driver.shell.addSlice(slice)
slice['node_ids'] = []
return slice
- #def get_existing_persons(self, users):
def verify_users(self, slice_hrn, slice_record, users, sfa_peer, options={}):
-
- slice_user_ids = slice_record['user_ids']
- all_users = self.driver.shell.getUsers()
- # filter slice users
- slice_users = [user for user in all_users if user['user_id'] in slice_user_ids]
-
+ # get slice info
slicename = hrn_to_nitos_slicename(slice_hrn)
slices = self.driver.shell.getSlices({}, [])
- slices = self.driver.filter_nitos_results(slices, {'slice_name': slicename})
-
- slice_user
- users_by_email = {}
- users_by_site = defaultdict(list)
- users_dict = {}
+ slice = self.driver.filter_nitos_results(slices, {'slice_name': slicename})[0]
+ added_users = []
+ #get users info
+ users_info = []
for user in users:
- user['urn'] = user['urn'].lower()
- hrn, type = urn_to_hrn(user['urn'])
- username = get_leaf(hrn)
- login_base = PlXrn(xrn=user['urn']).pl_login_base()
- user['username'] = username
- user['site'] = login_base
-
- if 'email' in user:
- user['email'] = user['email'].lower()
- users_by_email[user['email']] = user
- users_dict[user['email']] = user
- else:
- users_by_site[user['site']].append(user)
-
- # start building a list of existing users
- existing_user_ids = []
- existing_user_ids_filter = []
- if users_by_email:
- existing_user_ids_filter.extend(users_by_email.keys())
- if users_by_site:
- for login_base in users_by_site:
- users = users_by_site[login_base]
- for user in users:
- existing_user_ids_filter.append(user['username']+'@geni.net')
- if existing_user_ids_filter:
- # get existing users by email
- existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
- ['person_id', 'key_ids', 'email'])
- existing_user_ids.extend([user['email'] for user in existing_users])
-
- if users_by_site:
- # get a list of user sites (based on requeste user urns
- site_list = self.driver.shell.GetSites(users_by_site.keys(), \
- ['site_id', 'login_base', 'person_ids'])
- # get all existing users at these sites
- sites = {}
- site_user_ids = []
- for site in site_list:
- sites[site['site_id']] = site
- site_user_ids.extend(site['person_ids'])
-
- existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
- ['person_id', 'key_ids', 'email', 'site_ids'])
-
- # all requested users are either existing users or new (added) users
- for login_base in users_by_site:
- requested_site_users = users_by_site[login_base]
- for requested_user in requested_site_users:
- user_found = False
- for existing_user in existing_site_persons_list:
- for site_id in existing_user['site_ids']:
- if site_id in sites:
- site = sites[site_id]
- if login_base == site['login_base'] and \
- existing_user['email'].startswith(requested_user['username']+'@'):
- existing_user_ids.append(existing_user['email'])
- requested_user['email'] = existing_user['email']
- users_dict[existing_user['email']] = requested_user
- user_found = True
- break
- if user_found:
- break
-
- if user_found == False:
- fake_email = requested_user['username'] + '@geni.net'
- requested_user['email'] = fake_email
- users_dict[fake_email] = requested_user
-
- # requested slice users
- requested_user_ids = users_dict.keys()
- # existing slice users
- existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
- existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
- ['person_id', 'key_ids', 'email'])
- existing_slice_user_ids = [user['email'] for user in existing_slice_users]
-
- # users to be added, removed or updated
- added_user_ids = set(requested_user_ids).difference(existing_user_ids)
- added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
- removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
- updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
-
- # Remove stale users (only if we are not appending).
- # Append by default.
- append = options.get('append', True)
- if append == False:
- for removed_user_id in removed_user_ids:
- self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
- # update_existing users
- updated_users_list = [user for user in users_dict.values() if user['email'] in \
- updated_user_ids]
- self.verify_keys(existing_slice_users, updated_users_list, peer, options)
-
- added_persons = []
- # add new users
- for added_user_id in added_user_ids:
- added_user = users_dict[added_user_id]
- hrn, type = urn_to_hrn(added_user['urn'])
- person = {
- 'first_name': added_user.get('first_name', hrn),
- 'last_name': added_user.get('last_name', hrn),
- 'email': added_user_id,
- 'peer_person_id': None,
- 'keys': [],
- 'key_ids': added_user.get('key_ids', []),
- }
- person['person_id'] = self.driver.shell.AddPerson(person)
- if peer:
- person['peer_person_id'] = added_user['person_id']
- added_persons.append(person)
-
- # enable the account
- self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
-
- # add person to site
- self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
-
- for key_string in added_user.get('keys', []):
- key = {'key':key_string, 'key_type':'ssh'}
- key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
- person['keys'].append(key)
-
- # add the registry record
-# if sfa_peer:
-# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
-# 'pointer': person['person_id']}
-# self.registry.register_peer_object(self.credential, peer_dict)
-
- for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
- # add person to the slice
- self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
- # if this is a peer record then it should already be bound to a peer.
- # no need to return worry about it getting bound later
-
- return added_persons
-
+ user_urn = user['urn']
+ user_hrn, type = urn_to_hrn(user_urn)
+ username = str(user_hrn).split('.')[-1]
+ email = user['email']
+ # look for the user according to his username, email...
+ nitos_users = self.driver.filter_nitos_results(self.driver.shell.getUsers(), {'username': username})
+ if not nitos_users:
+ nitos_users = self.driver.filter_nitos_results(self.driver.shell.getUsers(), {'email': email})
+
+ if not nitos_users:
+ # create the user
+ user_id = self.driver.shell.addUsers({'username': email.split('@')[0], 'email': email})
+ added_users.append(user_id)
+ # add user keys
+ for key in user['keys']:
+ self.driver.shell.addUserKey({'user_id': user_id, 'key': key})
+ # add the user to the slice
+ self.driver.shell.addUserToSlice({'slice_id': slice['slice_id'], 'user_id': user_id})
+ else:
+ # check if the users are in the slice
+ for user in nitos_users:
+ if not user['user_id'] in slice['user_ids']:
+ self.driver.shell.addUserToSlice({'slice_id': slice['slice_id'], 'user_id': user['user_id']})
+
+ return added_users
+
def verify_keys(self, persons, users, options={}):
# existing keys
# do not include boot state (<available> element) in the manifest rspec
if not slice:
rspec_node['boot_state'] = node['boot_state']
- rspec_node['exclusive'] = 'false'
+
+ #add the exclusive tag to distinguish between Shared and Reservable nodes
+ if node['node_type'] == 'reservable':
+ rspec_node['exclusive'] = 'true'
+ else:
+ rspec_node['exclusive'] = 'false'
+
rspec_node['hardware_types'] = [HardwareType({'name': 'plab-pc'}),
HardwareType({'name': 'pc'})]
# only doing this because protogeni rspec needs
return (rspec_nodes, links)
- def get_leases(self, slice=None, options={}):
+ def get_leases(self, slice_xrn=None, slice=None, options={}):
+ if slice_xrn and not slice:
+ return []
+
now = int(time.time())
filter={}
filter.update({'clip':now})
site_id=lease['site_id']
site=sites_dict[site_id]
- rspec_lease['lease_id'] = lease['lease_id']
+ #rspec_lease['lease_id'] = lease['lease_id']
rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], lease['hostname'])
- slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
rspec_lease['slice_id'] = slice_urn
rspec_lease['start_time'] = lease['t_from']
rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) / grain
rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
- leases = self.get_leases(slice)
+ leases = self.get_leases(slice_xrn, slice)
rspec.version.add_leases(leases)
return rspec.toxml()
# add/remove slice from nodes
requested_slivers = {}
slivers = rspec.version.get_nodes_with_slivers()
- for node in slivers:
- hostname = None
- if node.get('component_name'):
- hostname = node.get('component_name').strip()
- elif node.get('component_id'):
- hostname = xrn_to_hostname(node.get('component_id').strip())
- if hostname:
- requested_slivers[hostname] = node
- nodes = slices.verify_slice_nodes(slice, requested_slivers.keys(), peer)
+ nodes = slices.verify_slice_nodes(slice, slivers, peer)
# add/remove links links
slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
# add/remove leases
- requested_leases = []
- kept_leases = []
- for lease in rspec.version.get_leases():
- requested_lease = {}
- if not lease.get('lease_id'):
- requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
- requested_lease['start_time'] = lease.get('start_time')
- requested_lease['duration'] = lease.get('duration')
- else:
- kept_leases.append(int(lease['lease_id']))
- if requested_lease.get('hostname'):
- requested_leases.append(requested_lease)
-
- leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
+ rspec_requested_leases = rspec.version.get_leases()
+ leases = slices.verify_slice_leases(slice, rspec_requested_leases, peer)
+ #requested_leases = []
+ #kept_leases = []
+ #for lease in rspec.version.get_leases():
+ # requested_lease = {}
+ # if not lease.get('lease_id'):
+ # requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
+ # requested_lease['start_time'] = lease.get('start_time')
+ # requested_lease['duration'] = lease.get('duration')
+ # else:
+ # kept_leases.append(int(lease['lease_id']))
+ # if requested_lease.get('hostname'):
+ # requested_leases.append(requested_lease)
+
+ #leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
# handle MyPLC peer association.
# only used by plc and ple.
slices.handle_peer(site, slice, persons, peer)
return aggregate.get_rspec(slice_xrn=slice_urn,
- version=rspec.version,
- requested_slivers = requested_slivers)
+ version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
slicename = hrn_to_pl_slicename(slice_hrn)
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
-from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename
+from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
+
+import time
MAXINT = 2L**31-1
return sfa_peer
- def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
-
- leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ def verify_slice_leases(self, slice, rspec_requested_leases, peer):
+
+ leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
- current_leases = [lease['lease_id'] for lease in leases]
- deleted_leases = list(set(current_leases).difference(kept_leases))
+
+ requested_leases = []
+ for lease in rspec_requested_leases:
+ requested_lease = {}
+ slice_name = hrn_to_pl_slicename(lease['slice_id'])
+ if slice_name != slice['name']:
+ continue
+ elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
+ continue
+
+ hostname = xrn_to_hostname(lease['component_id'])
+ # fill the requested node with nitos ids
+ requested_lease['name'] = slice['name']
+ requested_lease['hostname'] = hostname
+ requested_lease['t_from'] = int(lease['start_time'])
+ requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
+ requested_leases.append(requested_lease)
+
+
+
+ # prepare actual slice leases by lease_id
+ leases_by_id = {}
+ for lease in leases:
+ leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
+ 't_from': lease['t_from'], 't_until': lease['t_until']}
+
+ added_leases = []
+ kept_leases_id = []
+ deleted_leases_id = []
+ for lease_id in leases_by_id:
+ if leases_by_id[lease_id] not in requested_leases:
+ deleted_leases_id.append(lease_id)
+ else:
+ kept_leases_id.append(lease_id)
+ requested_leases.remove(leases_by_id[lease_id])
+ added_leases = requested_leases
+
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
- deleted=self.driver.shell.DeleteLeases(deleted_leases)
- for lease in requested_leases:
- added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['start_time']), int(lease['duration']) * grain + int(lease['start_time']))
+ self.driver.shell.DeleteLeases(deleted_leases_id)
+ for lease in added_leases:
+ self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
- def verify_slice_nodes(self, slice, requested_slivers, peer):
+ def verify_slice_nodes(self, slice, slivers, peer):
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
+ requested_slivers = []
+ tags = []
+ for node in slivers:
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name').strip()
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id').strip())
+ if node.get('client_id'):
+ tags.append({'slicename': slice['name'],
+ 'tagname': 'client_id',
+ 'value': node['client_id'],
+ 'node': hostname})
+ if hostname:
+ requested_slivers.append(hostname)
+
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(requested_slivers))
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
-
+
except:
logger.log_exc('Failed to add/remove slice from nodes')
+
+ # add tags
+ for tag in tags:
+ try:
+ self.driver.shell.AddSliceTag(tag['slicename'], tag['tagname'], tag['value'], tag['node'])
+ except:
+ logger.log_exc('Failed to add slice tag')
return nodes
def free_egre_key(self):
if len(network_elems) > 0:
network_elem = network_elems[0]
elif len(channels) > 0:
- #network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
- network_urn = "pla"
+ # dirty hack that handles no resource manifest rspec
+ network_urn = "omf"
network_elem = xml.add_element('network', name = network_urn)
else:
network_elem = xml
else:
network_elem = xml
+ # needs to be improuved to retreive the gateway addr dynamically.
+ gateway_addr = 'nitlab.inf.uth.gr'
+
node_elems = []
for node in nodes:
node_fields = ['component_manager_id', 'component_id', 'boot_state']
if position_3d:
node_elem.add_instance('position_3d', position_3d, Position3D.fields)
+ # all nitos nodes are exculsive
+ exclusive_elem = node_elem.add_element('exclusive')
+ exclusive_elem.set_text('TRUE')
+
+ # In order to access nitos nodes, one need to pass through the nitos gateway
+ # here we advertise Nitos access gateway address
+ gateway_elem = node_elem.add_element('gateway')
+ gateway_elem.set_text(gateway_addr)
# add granularity of the reservation system
granularity = node.get('granularity')['grain']
for attribute in attributes:
if attribute['name'] == 'initscript':
xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
- elif tag['tagname'] == 'flack_info':
+ elif attribute['tagname'] == 'flack_info':
attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
attrib_dict = eval(tag['value'])
for (key, value) in attrib_dict.items():
else:
network_elem = xml
- lease_elems = []
- for lease in leases:
- lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
- lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+
+
+# lease_elems = []
+# for lease in leases:
+# lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+# lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+# lease_elems.append(lease_elem)
+
@staticmethod
def get_leases(xml, filter={}):
@staticmethod
def get_lease_objs(lease_elems):
- leases = []
+ leases = []
for lease_elem in lease_elems:
- lease = Lease(lease_elem.attrib, lease_elem)
- if lease.get('lease_id'):
- lease['lease_id'] = lease_elem.attrib['lease_id']
- lease['component_id'] = lease_elem.attrib['component_id']
- lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = lease_elem.attrib['start_time']
- lease['duration'] = lease_elem.attrib['duration']
-
- leases.append(lease)
- return leases
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+
+ return leases
+
+
+
+
+
+# leases = []
+# for lease_elem in lease_elems:
+# lease = Lease(lease_elem.attrib, lease_elem)
+# if lease.get('lease_id'):
+# lease['lease_id'] = lease_elem.attrib['lease_id']
+# lease['component_id'] = lease_elem.attrib['component_id']
+# lease['slice_id'] = lease_elem.attrib['slice_id']
+# lease['start_time'] = lease_elem.attrib['start_time']
+# lease['duration'] = lease_elem.attrib['duration']
+
+# leases.append(lease)
+# return leases
if location:
node_elem.add_instance('location', location, Location.fields)
- # add granularity of the reservation system
- granularity = node.get('granularity')
- if granularity:
- node_elem.add_instance('granularity', granularity, granularity.fields)
+ # add exclusive tag to distinguish between Reservable and Shared nodes
+ exclusive_elem = node_elem.add_element('exclusive')
+ if node.get('exclusive') and node.get('exclusive') == 'true':
+ exclusive_elem.set_text('TRUE')
+ # add granularity of the reservation system
+ granularity = node.get('granularity')
+ if granularity:
+ node_elem.add_instance('granularity', granularity, granularity.fields)
+ else:
+ exclusive_elem.set_text('FALSE')
if isinstance(node.get('interfaces'), list):
logger.info("load from xml, keys=%s"%xml_dict.keys())
return make_record_dict (xml_dict)
+####################
+# augment local records with data from builtin relationships
+# expose related objects as a list of hrns
+# we pick names that clearly won't conflict with the ones used in the old approach,
+# were the relationships data came from the testbed side
+# for each type, a dict of the form {<field-name-exposed-in-record>:<alchemy_accessor_name>}
+# so after that, an 'authority' record will e.g. have a 'reg-pis' field with the hrns of its pi-users
+augment_map={'authority': {'reg-pis':'reg_pis',},
+ 'slice': {'reg-researchers':'reg_researchers',},
+ 'user': {'reg-pi-authorities':'reg_authorities_as_pi',
+ 'reg-slices':'reg_slices_as_researcher',},
+ }
+
+def augment_with_sfa_builtins (local_record):
+ # don't ruin the import of that file in a client world
+ from sfa.util.xrn import Xrn
+ # add a 'urn' field
+ setattr(local_record,'reg-urn',Xrn(xrn=local_record.hrn,type=local_record.type).urn)
+ # users have keys and this is needed to synthesize 'users' sent over to CreateSliver
+ if local_record.type=='user':
+ user_keys = [ key.key for key in local_record.reg_keys ]
+ setattr(local_record, 'reg-keys', user_keys)
+ # search in map according to record type
+ type_map=augment_map.get(local_record.type,{})
+ # use type-dep. map to do the job
+ for (field_name,attribute) in type_map.items():
+ # get related objects
+ related_records = getattr(local_record,attribute,[])
+ hrns = [ r.hrn for r in related_records ]
+ setattr (local_record, field_name, hrns)
+
+
# fallback
return "** undef_datetime **"
- def todict (self):
+ # it may be important to exclude relationships, which fortunately
+ #
+ def todict (self, exclude_types=[]):
d=self.__dict__
- keys=[k for k in d.keys() if not k.startswith('_')]
+ def exclude (k,v):
+ if k.startswith('_'): return True
+ if exclude_types:
+ for exclude_type in exclude_types:
+ if isinstance (v,exclude_type): return True
+ return False
+ keys=[k for (k,v) in d.items() if not exclude(k,v)]
return dict ( [ (k,d[k]) for k in keys ] )
def toxml(self):