From: Git User Date: Sun, 10 Nov 2013 22:59:07 +0000 (-0500) Subject: Merge remote-tracking branch 'local_master/geni-v3' into geni-v3 X-Git-Tag: sfa-3.1-1~45 X-Git-Url: http://git.onelab.eu/?p=sfa.git;a=commitdiff_plain;h=a6d9810733f7d21fd43ba519dae5ccfcdc138095;hp=02f42286da5bd6dbe62a31ea9b733ee3225903aa Merge remote-tracking branch 'local_master/geni-v3' into geni-v3 --- diff --git a/Makefile b/Makefile index 82ec3a96..144c06c3 100644 --- a/Makefile +++ b/Makefile @@ -127,8 +127,12 @@ force: # a lot of stuff in the working dir is just noise files: @find . -type f | egrep -v '^\./\.|/\.git/|/\.svn/|TAGS|AA-|~$$|egg-info|\.(py[co]|doc|html|pdf|png|svg|out|bak|dg|pickle)$$' + +git-files: + @git ls-files | grep -v '\.doc$$' + tags: - $(MAKE) files | xargs etags + $(MAKE) git-files | xargs etags .PHONY: files tags @@ -149,7 +153,7 @@ SSHURL:=root@$(PLC):/ SSHCOMMAND:=ssh root@$(PLC) else ifdef PLCHOSTLXC -SSHURL:=root@$(PLCHOSTLXC):/var/lib/lxc/$(GUESTNAME)/rootfs +SSHURL:=root@$(PLCHOSTLXC):/vservers/$(GUESTNAME)/rootfs SSHCOMMAND:=ssh root@$(PLCHOSTLXC) ssh $(GUESTHOSTNAME) else ifdef PLCHOSTVS diff --git a/sfa/client/manifolduploader.py b/sfa/client/manifolduploader.py index b27d99d7..f8ca0f9d 100755 --- a/sfa/client/manifolduploader.py +++ b/sfa/client/manifolduploader.py @@ -7,17 +7,21 @@ # install a separate tool; so duplicating this code is suboptimal in # terms of code sharing but acceptable for hopefully easier use # +# As of Nov. 2013, the signature for the forward API call has changed +# and now requires authentication to be passed as an annotation +# We take this chance to make things much simpler here by dropping +# support for multiple API versions/flavours +# # As of April 2013, manifold is moving from old-fashioned API known as # v1, that offers an AddCredential API call, towards a new API v2 that # manages credentials with the same set of Get/Update calls as other # objects # -# Since this code targets the future we favour v2, however in case -# this won't work the v1 way is attempted too -# -## this for now points at demo.myslice.info, but sounds like a -## better default for the long run +# mostly this is intended to be used through 'sfi myslice' +# so the defaults below are of no real importance +# this for now points at demo.myslice.info, but sounds like a +# better default for the long run DEFAULT_URL = "http://myslice.onelab.eu:7080" DEFAULT_PLATFORM = 'ple' @@ -63,15 +67,21 @@ class ManifoldUploader: def prompt_all(self): self.username(); self.password(); self.platform(); self.url() + # looks like the current implementation of manifold server + # won't be happy with several calls issued in the same session + # so we do not cache this one def proxy (self): - if not self._proxy: - url=self.url() - self.logger.debug("Connecting manifold url %s"%url) - self._proxy = xmlrpclib.ServerProxy(url, allow_none = True) - return self._proxy +# if not self._proxy: +# url=self.url() +# self.logger.info("Connecting manifold url %s"%url) +# self._proxy = xmlrpclib.ServerProxy(url, allow_none = True) +# return self._proxy + url=self.url() + self.logger.debug("Connecting manifold url %s"%url) + return xmlrpclib.ServerProxy(url, allow_none = True) # does the job for one credential - # expects the credential (string) and an optional message for reporting + # expects the credential (string) and an optional message (e.g. hrn) for reporting # return True upon success and False otherwise def upload (self, delegated_credential, message=None): platform=self.platform() @@ -81,21 +91,20 @@ class ManifoldUploader: if not message: message="" try: - # looks like the current implementation of manifold server - # won't be happy with several calls issued in the same session -# manifold=self.proxy() - url=self.url() - self.logger.debug("Connecting manifold url %s"%url) - manifold = xmlrpclib.ServerProxy(url, allow_none = True) + manifold=self.proxy() # the code for a V2 interface - query= { 'action': 'update', + query = { 'action': 'update', 'object': 'local:account', 'filters': [ ['platform', '=', platform] ] , 'params': {'credential': delegated_credential, }, } + annotation = {'authentication': auth, } + # in principle the xmlrpc call should not raise an exception + # but fill in error code and messages instead + # however this is only theoretical so let's be on the safe side try: - self.logger.debug("Trying v2 method Update@%s %s"%(platform,message)) - retcod2=manifold.Update (auth, query) + self.logger.debug("Using new v2 method forward+annotation@%s %s"%(platform,message)) + retcod2=manifold.forward (query, annotation) except Exception,e: # xxx we need a constant constant for UNKNOWN, how about using 1 MANIFOLD_UNKNOWN=1 @@ -106,30 +115,11 @@ class ManifoldUploader: info += 'v2 upload OK' self.logger.info(info) return True - #print delegated_credential, "upload failed,",retcod['description'], \ - # "with code",retcod['code'] - # the code for V1 - try: - self.logger.debug("Trying v1 method AddCredential@%s %s"%(platform,message)) - retcod1=manifold.AddCredential(auth, delegated_credential, platform) - except Exception,e: - retcod1=e - if retcod1==1: - info="" - if message: info += message+" " - info += 'v1 upload OK' - self.logger.info(message) - return True # everything has failed, let's report - if message: self.logger.error("Could not upload %s"%message) - else: self.logger.error("Could not upload credential") - if 'code' in retcod2 and 'description' in retcod2: - self.logger.info(" V2 Update returned code %s and error >>%s<<"%(retcod2['code'],retcod2['description'])) - self.logger.debug("****** full retcod2") - for (k,v) in retcod2.items(): self.logger.debug("**** %s: %s"%(k,v)) - else: - self.logger.info(" V2 Update returned %s"%retcod2) - self.logger.info(" V1 AddCredential returned code %s (expected 1)"%retcod1) + self.logger.error("Could not upload %s"%(message if message else "credential")) + self.logger.info(" V2 Update returned code %s and error >>%s<<"%(retcod2['code'],retcod2['description'])) + self.logger.debug("****** full retcod2") + for (k,v) in retcod2.items(): self.logger.debug("**** %s: %s"%(k,v)) return False except Exception, e: if message: self.logger.error("Could not upload %s %s"%(message,e)) diff --git a/sfa/server/threadmanager.py b/sfa/client/multiclient.py similarity index 90% rename from sfa/server/threadmanager.py rename to sfa/client/multiclient.py index b47b8186..75573ed5 100644 --- a/sfa/server/threadmanager.py +++ b/sfa/client/multiclient.py @@ -16,7 +16,7 @@ def ThreadedMethod(callable, results, errors): try: results.put(callable(*args, **kwds)) except Exception, e: - logger.log_exc('ThreadManager: Error in thread: ') + logger.log_exc('MultiClient: Error in thread: ') errors.put(traceback.format_exc()) thread = ThreadInstance() @@ -26,10 +26,10 @@ def ThreadedMethod(callable, results, errors): -class ThreadManager: +class MultiClient: """ - ThreadManager executes a callable in a thread and stores the result - in a thread safe queue. + MultiClient allows to issue several SFA calls in parallel in different threads + and stores the results in a thread safe queue. """ def __init__(self): @@ -58,7 +58,7 @@ class ThreadManager: """ Return a list of all the results so far. Blocks until all threads are finished. - If lienent is set to false the error queue will be checked before + If lenient is set to false the error queue will be checked before the response is returned. If there are errors in the queue an SFA Fault will be raised. """ @@ -107,7 +107,7 @@ if __name__ == '__main__': time.sleep(sleep) return nums - threads = ThreadManager() + threads = MultiClient() threads.run(f, "Thread1", 10, 2) threads.run(f, "Thread2", -10, 1) threads.run(e, "Thread3", 19, 1) diff --git a/sfa/cortexlab/cortexlabpostgres.py b/sfa/cortexlab/cortexlabpostgres.py index cd4fc585..dca63f53 100644 --- a/sfa/cortexlab/cortexlabpostgres.py +++ b/sfa/cortexlab/cortexlabpostgres.py @@ -75,7 +75,7 @@ class IotlabDB(object): """ Class used with this Python singleton design pattern to allow the definition of one single instance of iotlab db session in the whole - code. Wherever a conenction to the database is needed, this class + code. Wherever a connection to the database is needed, this class returns the same instance every time. Removes the need for global variable throughout the code. """ diff --git a/sfa/importer/plimporter.py b/sfa/importer/plimporter.py index 41325a89..7f5cf860 100644 --- a/sfa/importer/plimporter.py +++ b/sfa/importer/plimporter.py @@ -192,7 +192,7 @@ class PlImporter: # start importing for site in sites: - if site['name'].startswith('sfa.'): + if site['name'].startswith('sfa:'): continue site_hrn = _get_site_hrn(interface_hrn, site) diff --git a/sfa/managers/slice_manager.py b/sfa/managers/slice_manager.py index 746bac6b..05b0f1ee 100644 --- a/sfa/managers/slice_manager.py +++ b/sfa/managers/slice_manager.py @@ -14,7 +14,7 @@ from sfa.util.version import version_core from sfa.util.callids import Callids from sfa.util.cache import Cache -from sfa.server.threadmanager import ThreadManager +from sfa.client.multiclient import MultiClient from sfa.rspecs.rspec_converter import RSpecConverter from sfa.rspecs.version_manager import VersionManager @@ -147,7 +147,7 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -157,10 +157,10 @@ class SliceManager: # get the rspec from the aggregate interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run(_ListResources, aggregate, server, [cred], options) + multiclient.run(_ListResources, aggregate, server, [cred], options) - results = threads.get_results() + results = multiclient.get_results() rspec_version = version_manager.get_version(options.get('geni_rspec_version')) if xrn: result_version = version_manager._get_version(rspec_version.type, rspec_version.version, 'manifest') @@ -230,7 +230,7 @@ class SliceManager: hrn, type = urn_to_hrn(xrn) valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0] caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -239,9 +239,9 @@ class SliceManager: interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) # Just send entire RSpec to each aggregate - threads.run(_Allocate, aggregate, server, xrn, [cred], rspec.toxml(), options) + multiclient.run(_Allocate, aggregate, server, xrn, [cred], rspec.toxml(), options) - results = threads.get_results() + results = multiclient.get_results() manifest_version = version_manager._get_version(rspec.version.type, rspec.version.version, 'manifest') result_rspec = RSpec(version=manifest_version) geni_urn = None @@ -290,7 +290,7 @@ class SliceManager: # get the callers hrn valid_cred = api.auth.checkCredentials(creds, 'createsliver', xrn)[0] caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -299,9 +299,9 @@ class SliceManager: interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) # Just send entire RSpec to each aggregate - threads.run(_Provision, aggregate, server, xrn, [cred], options) + multiclient.run(_Provision, aggregate, server, xrn, [cred], options) - results = threads.get_results() + results = multiclient.get_results() manifest_version = version_manager._get_version('GENI', '3', 'manifest') result_rspec = RSpec(version=manifest_version) geni_slivers = [] @@ -350,7 +350,7 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential(minimumExpiration=31*86400) - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -358,9 +358,9 @@ class SliceManager: continue interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run(_Renew, aggregate, server, xrn, [cred], expiration_time, options) + multiclient.run(_Renew, aggregate, server, xrn, [cred], expiration_time, options) - results = threads.get_results() + results = multiclient.get_results() geni_code = 0 geni_output = ",".join([x.get('output',"") for x in results]) @@ -390,7 +390,7 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -398,10 +398,10 @@ class SliceManager: continue interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run(_Delete, server, xrn, [cred], options) + multiclient.run(_Delete, server, xrn, [cred], options) results = [] - for result in threads.get_results(): + for result in multiclient.get_results(): results += ReturnValue.get_value(result) return results @@ -417,12 +417,12 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run (_Status, server, slice_xrn, [cred], options) - results = [ReturnValue.get_value(result) for result in threads.get_results()] + multiclient.run (_Status, server, slice_xrn, [cred], options) + results = [ReturnValue.get_value(result) for result in multiclient.get_results()] # get rid of any void result - e.g. when call_id was hit, where by convention we return {} results = [ result for result in results if result and result['geni_slivers']] @@ -455,12 +455,12 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run (_Describe, server, xrns, [cred], options) - results = [ReturnValue.get_value(result) for result in threads.get_results()] + multiclient.run (_Describe, server, xrns, [cred], options) + results = [ReturnValue.get_value(result) for result in multiclient.get_results()] # get rid of any void result - e.g. when call_id was hit, where by convention we return {} results = [ result for result in results if result and result.get('geni_urn')] @@ -496,7 +496,7 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -504,8 +504,8 @@ class SliceManager: continue interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run(server.PerformOperationalAction, xrn, [cred], action, options) - threads.get_results() + multiclient.run(server.PerformOperationalAction, xrn, [cred], action, options) + multiclient.get_results() return 1 def Shutdown(self, api, xrn, creds, options={}): @@ -518,7 +518,7 @@ class SliceManager: cred = api.getDelegatedCredential(creds) if not cred: cred = api.getCredential() - threads = ThreadManager() + multiclient = MultiClient() for aggregate in api.aggregates: # prevent infinite loop. Dont send request back to caller # unless the caller is the aggregate's SM @@ -526,7 +526,7 @@ class SliceManager: continue interface = api.aggregates[aggregate] server = api.server_proxy(interface, cred) - threads.run(server.Shutdown, xrn.urn, cred) - threads.get_results() + multiclient.run(server.Shutdown, xrn.urn, cred) + multiclient.get_results() return 1 diff --git a/sfa/openstack/osaggregate.py b/sfa/openstack/osaggregate.py index de349955..16eec41e 100644 --- a/sfa/openstack/osaggregate.py +++ b/sfa/openstack/osaggregate.py @@ -24,7 +24,7 @@ from sfa.planetlab.plxrn import PlXrn from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename from sfa.rspecs.version_manager import VersionManager from sfa.openstack.security_group import SecurityGroup -from sfa.server.threadmanager import ThreadManager +from sfa.client.multiclient import MultiClient from sfa.util.sfalogging import logger def pubkeys_to_user_data(pubkeys): @@ -412,7 +412,7 @@ class OSAggregate: time.sleep(.5) manager.delete_security_group(security_group) - thread_manager = ThreadManager() + multiclient = MultiClient() tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id) self.driver.shell.nova_manager.connect(tenant=tenant.name) args = {'name': instance.name, @@ -423,7 +423,7 @@ class OSAggregate: # destroy instance self.driver.shell.nova_manager.servers.delete(instance) # deleate this instance's security groups - thread_manager.run(_delete_security_group, instance) + multiclient.run(_delete_security_group, instance) return 1 def stop_instances(self, instance_name, tenant_name, id=None): diff --git a/sfa/planetlab/plaggregate.py b/sfa/planetlab/plaggregate.py index fdd70e5f..2d26f0f3 100644 --- a/sfa/planetlab/plaggregate.py +++ b/sfa/planetlab/plaggregate.py @@ -18,7 +18,7 @@ from sfa.rspecs.elements.lease import Lease from sfa.rspecs.elements.granularity import Granularity from sfa.rspecs.version_manager import VersionManager -from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, slicename_to_hrn, xrn_to_ext_slicename, top_auth +from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, slicename_to_hrn, top_auth, hash_loginbase from sfa.planetlab.vlink import get_tc_rate from sfa.planetlab.topology import Topology from sfa.storage.alchemy import dbsession @@ -131,10 +131,14 @@ class PlAggregate: else: slice_hrn = xrn.get_hrn() top_auth_hrn = top_auth(slice_hrn) + site_hrn = '.'.join(slice_hrn.split('.')[:-1]) + slice_part = slice_hrn.split('.')[-1] if top_auth_hrn == self.driver.hrn: - slice_name = hrn_to_pl_slicename(slice_hrn) + login_base = slice_hrn.split('.')[-2][:12] else: - slice_name = xrn_to_ext_slicename(slice_hrn) + login_base = hash_loginbase(site_hrn) + + slice_name = '_'.join([login_base, slice_part]) names.add(slice_name) filter = {} diff --git a/sfa/planetlab/pldriver.py b/sfa/planetlab/pldriver.py index 79c61f0a..6fb8d602 100644 --- a/sfa/planetlab/pldriver.py +++ b/sfa/planetlab/pldriver.py @@ -25,7 +25,7 @@ from sfa.planetlab.plshell import PlShell import sfa.planetlab.peers as peers from sfa.planetlab.plaggregate import PlAggregate from sfa.planetlab.plslices import PlSlices -from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname, xrn_to_ext_slicename, top_auth +from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname, top_auth, hash_loginbase def list_to_dict(recs, key): @@ -767,12 +767,17 @@ class PlDriver (Driver): # set the 'enabled' tag to 0 def shutdown (self, xrn, options={}): - hrn = urn_to_hrn(xrn) + hrn, _ = urn_to_hrn(xrn) top_auth_hrn = top_auth(hrn) - if top_auth_hrn == self.hrn: - slicename = hrn_to_pl_slicename(hrn) + site_hrn = '.'.join(hrn.split('.')[:-1]) + slice_part = hrn.split('.')[-1] + if top_auth_hrn == self.driver.hrn: + login_base = slice_hrn.split('.')[-2][:12] else: - slicename = xrn_to_ext_slicename(hrn) + login_base = hash_loginbase(site_hrn) + + slicename = '_'.join([login_base, slice_part]) + slices = self.shell.GetSlices({'name': slicename}, ['slice_id']) if not slices: raise RecordNotFound(slice_hrn) diff --git a/sfa/planetlab/plshell.py b/sfa/planetlab/plshell.py index 15334db0..5162fe84 100644 --- a/sfa/planetlab/plshell.py +++ b/sfa/planetlab/plshell.py @@ -28,7 +28,11 @@ class PlShell: 'AddLeases', # HRN management methods 'SetPersonHrn', 'GetPersonHrn', 'SetSliceHrn', 'GetSliceHrn', - 'SetNodeHrn', 'GetNodeHrn' + 'SetNodeHrn', 'GetNodeHrn', 'GetSiteHrn', 'SetSiteHrn', + # Tag slice/person/site created by SFA + 'SetPersonSfaCreated', 'GetPersonSfaCreated', 'SetSliceSfaCreated', + 'GetSliceSfaCreated', 'SetNodeSfaCreated', 'GetNodeSfaCreated', + 'GetSiteSfaCreated', 'SetSiteSfaCreated', ] # support for other names - this is experimental alias_calls = { 'get_authorities':'GetSites', @@ -64,7 +68,7 @@ class PlShell: except: plc_direct_access=False if is_local and plc_direct_access: - logger.debug('plshell access - capability') + logger.info('plshell access - capability') self.plauth = { 'AuthMethod': 'capability', 'Username': config.SFA_PLC_USER, 'AuthString': config.SFA_PLC_PASSWORD, @@ -72,7 +76,7 @@ class PlShell: self.proxy = PLC.Shell.Shell () else: - logger.debug('plshell access - xmlrpc') + logger.info('plshell access - xmlrpc') self.plauth = { 'AuthMethod': 'password', 'Username': config.SFA_PLC_USER, 'AuthString': config.SFA_PLC_PASSWORD, diff --git a/sfa/planetlab/plslices.py b/sfa/planetlab/plslices.py index c277a698..703dac30 100644 --- a/sfa/planetlab/plslices.py +++ b/sfa/planetlab/plslices.py @@ -8,7 +8,7 @@ from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn from sfa.rspecs.rspec import RSpec from sfa.planetlab.vlink import VLink from sfa.planetlab.topology import Topology -from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname, xrn_to_ext_slicename, hrn_to_ext_loginbase, top_auth +from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname, top_auth, hash_loginbase from sfa.storage.model import SliverAllocation from sfa.storage.alchemy import dbsession @@ -170,11 +170,17 @@ class PlSlices: for lease in rspec_requested_leases: requested_lease = {} slice_hrn, _ = urn_to_hrn(lease['slice_id']) + top_auth_hrn = top_auth(slice_hrn) + site_hrn = '.'.join(slice_hrn.split('.')[:-1]) + slice_part = slice_hrn.split('.')[-1] if top_auth_hrn == self.driver.hrn: - slice_name = hrn_to_pl_slicename(lease['slice_id']) + login_base = slice_hrn.split('.')[-2][:12] else: - slice_name = xrn_to_ext_slicename(lease['slice_id']) + login_base = hash_loginbase(site_hrn) + + slice_name = '_'.join([login_base, slice_part]) + if slice_name != slice['name']: continue elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn: @@ -381,297 +387,178 @@ class PlSlices: def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}): (slice_hrn, type) = urn_to_hrn(slice_xrn) top_auth_hrn = top_auth(slice_hrn) + site_hrn = '.'.join(slice_hrn.split('.')[:-1]) if top_auth_hrn == self.driver.hrn: - # login base can't be longer than 20 characters - slicename = hrn_to_pl_slicename(slice_hrn) - authority_name = slicename.split('_')[0] - login_base = authority_name[:20] + login_base = slice_hrn.split('.')[-2][:12] else: - login_base = hrn_to_ext_loginbase(slice_hrn) - authority_name = login_base + login_base = hash_loginbase(site_hrn) + + sites = self.driver.shell.GetSites({'peer_id': None},['site_id','name','abbreviated_name','login_base','hrn']) + + # filter sites by hrn + site_exists = [site for site in sites if site['hrn'] == site_hrn] - sites = self.driver.shell.GetSites(login_base) - if not sites: + if not site_exists: # create new site record - site = {'name': 'sfa.%s' % authority_name, - 'abbreviated_name': authority_name, + site = {'name': 'sfa:%s' % site_hrn, + 'abbreviated_name': site_hrn, 'login_base': login_base, 'max_slices': 100, 'max_slivers': 1000, 'enabled': True, 'peer_site_id': None} - if peer: - site['peer_site_id'] = slice_record.get('site_id', None) + site['site_id'] = self.driver.shell.AddSite(site) + # Set site HRN + self.driver.shell.SetSiteHrn(int(site['site_id']), site_hrn) + # Tag this as created through SFA + self.driver.shell.SetSiteSfaCreated(int(site['site_id']), 'True') # exempt federated sites from monitor policies - self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101") - -# # is this still necessary? -# # add record to the local registry -# if sfa_peer and slice_record: -# peer_dict = {'type': 'authority', 'hrn': site_hrn, \ -# 'peer_authority': sfa_peer, 'pointer': site['site_id']} -# self.registry.register_peer_object(self.credential, peer_dict) + self.driver.shell.AddSiteTag(int(site['site_id']), 'exempt_site_until', "20200101") + else: - site = sites[0] - if peer: - # unbind from peer so we can modify if necessary. Will bind back later - self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname']) + site = site_exists[0] return site def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, expiration, options={}): top_auth_hrn = top_auth(slice_hrn) + site_hrn = '.'.join(slice_hrn.split('.')[:-1]) + slice_part = slice_hrn.split('.')[-1] if top_auth_hrn == self.driver.hrn: - slicename = hrn_to_pl_slicename(slice_hrn) - parts = slicename.split("_") - login_base = parts[0] + login_base = slice_hrn.split('.')[-2][:12] else: - login_base = hrn_to_ext_loginbase(slice_hrn) - slicename = xrn_to_ext_slicename(slice_hrn) + login_base = hash_loginbase(site_hrn) + - slices = self.driver.shell.GetSlices([slicename]) + slice_name = '_'.join([login_base, slice_part]) + + slices = self.driver.shell.GetSlices({'peer_id': None},['slice_id','name','hrn']) + # Filter slices by HRN + slice_exists = [slice for slice in slices if slice['hrn'] == slice_hrn] expires = int(datetime_to_epoch(utcparse(expiration))) - if not slices: - slice = {'name': slicename, - 'url': slice_record.get('url', slice_hrn), + if not slice_exists: + slice = {'name': slice_name, + 'url': slice_record.get('url', slice_hrn), 'description': slice_record.get('description', slice_hrn)} # add the slice slice['slice_id'] = self.driver.shell.AddSlice(slice) - slice['node_ids'] = [] - slice['person_ids'] = [] # set the slice HRN - self.driver.shell.SetSliceHrn(int(slice['slice_id']), slice_hrn) - - if peer and slice_record: - slice['peer_slice_id'] = slice_record.get('slice_id', None) + self.driver.shell.SetSliceHrn(int(slice['slice_id']), slice_hrn) + # Tag this as created through SFA + self.driver.shell.SetSliceSfaCreated(int(slice['slice_id']), 'True') # set the expiration - self.driver.shell.UpdateSlice(slice['slice_id'], {'expires': expires}) + self.driver.shell.UpdateSlice(int(slice['slice_id']), {'expires': expires}) + else: - slice = slices[0] - # Check slice HRN - if self.driver.shell.GetSliceHrn(slice['slice_id']) != slice_hrn: - self.driver.shell.SetSliceHrn(slice['slice_id'], slice_hrn) - - if peer and slice_record: - slice['peer_slice_id'] = slice_record.get('slice_id', None) - # unbind from peer so we can modify if necessary. Will bind back later - self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname']) - - #Update expiration if necessary - if slice['expires'] != expires: - self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : expires}) - - return slice + slice = slice_exists[0] + #Update expiration if necessary + if slice.get('expires', None) != expires: + self.driver.shell.UpdateSlice( int(slice['slice_id']), {'expires' : expires}) + + return self.driver.shell.GetSlices(int(slice['slice_id']))[0] + - #def get_existing_persons(self, users): def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}): - users_by_email = {} - users_by_site = defaultdict(list) - users_dict = {} + top_auth_hrn = top_auth(slice_hrn) + site_hrn = '.'.join(slice_hrn.split('.')[:-1]) + slice_part = slice_hrn.split('.')[-1] + users_by_hrn = {} for user in users: - user['urn'] = user['urn'].lower() - hrn, type = urn_to_hrn(user['urn']) - username = get_leaf(hrn) - user['username'] = username + user['hrn'], _ = urn_to_hrn(user['urn']) + users_by_hrn[user['hrn']] = user + + if top_auth_hrn == self.driver.hrn: + login_base = slice_hrn.split('.')[-2][:12] + else: + login_base = hash_loginbase(site_hrn) - top_auth_hrn = top_auth(hrn) + slice_name = '_'.join([login_base, slice_part]) - if top_auth_hrn == self.driver.hrn: - login_base = PlXrn(xrn=user['urn']).pl_login_base() - else: - login_base = hrn_to_ext_loginbase(hrn) + persons = self.driver.shell.GetPersons({'peer_id': None},['person_id','email','hrn']) + site = self.driver.shell.GetSites({'peer_id': None, 'login_base': login_base})[0] + slice = self.driver.shell.GetSlices({'peer_id': None, 'name': slice_name})[0] + slice_persons = self.driver.shell.GetPersons({'peer_id': None, 'person_id': slice['person_ids']},['person_id','email','hrn']) - user['site'] = login_base - if 'email' in user: - user['email'] = user['email'].lower() - users_by_email[user['email']] = user - users_dict[user['email']] = user - else: - users_by_site[user['site']].append(user) - - # start building a list of existing users - existing_user_ids = [] - existing_user_ids_filter = [] - if users_by_email: - existing_user_ids_filter.extend(users_by_email.keys()) - if users_by_site: - for login_base in users_by_site: - users = users_by_site[login_base] - for user in users: - existing_user_ids_filter.append(user['username']+'@geni.net') - - if existing_user_ids_filter: - # get existing users by email - existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter}, - ['person_id', 'key_ids', 'email']) - existing_user_ids.extend([user['email'] for user in existing_users]) - - if users_by_site: - # get a list of user sites (based on requeste user urns - site_list = self.driver.shell.GetSites(users_by_site.keys(), \ - ['site_id', 'login_base', 'person_ids']) - # get all existing users at these sites - sites = {} - site_user_ids = [] - for site in site_list: - sites[site['site_id']] = site - site_user_ids.extend(site['person_ids']) - - existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids, - ['person_id', 'key_ids', 'email', 'site_ids']) - - # all requested users are either existing users or new (added) users - for login_base in users_by_site: - requested_site_users = users_by_site[login_base] - for requested_user in requested_site_users: - user_found = False - for existing_user in existing_site_persons_list: - for site_id in existing_user['site_ids']: - if site_id in sites: - site = sites[site_id] - if login_base == site['login_base'] and \ - existing_user['email'].startswith(requested_user['username']+'@'): - existing_user_ids.append(existing_user['email']) - requested_user['email'] = existing_user['email'] - users_dict[existing_user['email']] = requested_user - user_found = True - break - if user_found: - break - - if user_found == False: - fake_email = requested_user['username'] + '@geni.net' - requested_user['email'] = fake_email - users_dict[fake_email] = requested_user - - # requested slice users - requested_user_ids = users_dict.keys() - # existing slice users - existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])} - existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter, - ['person_id', 'key_ids', 'email']) - existing_slice_user_ids = [user['email'] for user in existing_slice_users] - - # users to be added, removed or updated - added_user_ids = set(requested_user_ids).difference(existing_user_ids) - added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids) - removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids) - updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids) - - # Remove stale users (only if we are not appending). - # Append by default. - append = options.get('append', True) - if append == False: - for removed_user_id in removed_user_ids: - self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name']) - # update_existing users - updated_users_list = [user for user in users_dict.values() if user['email'] in \ - updated_user_ids] - self.verify_keys(existing_slice_users, updated_users_list, peer, options) - - added_persons = [] - # add new users - for added_user_id in added_user_ids: - added_user = users_dict[added_user_id] - hrn, type = urn_to_hrn(added_user['urn']) - person = { - 'first_name': added_user.get('first_name', hrn), - 'last_name': added_user.get('last_name', hrn), - 'email': added_user_id, - #'peer_person_id': None, - #'keys': [], - #'key_ids': added_user.get('key_ids', []), - } - person['person_id'] = self.driver.shell.AddPerson(person) - self.driver.shell.AddRoleToPerson('user', int(person['person_id'])) - # check user HRN - if self.driver.shell.GetPersonHrn(int(person['person_id'])) != hrn: - self.driver.shell.SetPersonHrn(int(person['person_id']), hrn) + persons_by_hrn = {} + persons_by_email = {} + for person in persons: + persons_by_hrn[person['hrn']] = person + persons_by_email[person['email']] = person + slice_persons_by_hrn = {} + for slice_person in slice_persons: + slice_persons_by_hrn[slice_person['hrn']] = slice_person - if peer: - person['peer_person_id'] = added_user['person_id'] - added_persons.append(person) + # sort persons by HRN + persons_to_add = set(users_by_hrn.keys()).difference(slice_persons_by_hrn.keys()) + persons_to_delete = set(slice_persons_by_hrn.keys()).difference(users_by_hrn.keys()) + persons_to_keep = set(users_by_hrn.keys()).intersection(slice_persons_by_hrn.keys()) - # enable the account - self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True}) - # add person to site - self.driver.shell.AddPersonToSite(added_user_id, added_user['site']) + persons_to_verify_keys = {} - for key_string in added_user.get('keys', []): - key = {'key':key_string, 'key_type':'ssh'} - key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key) - if 'keys' not in person: - person['keys'] = [] - person['keys'].append(key) + # Add persons or add persons to slice + for person_hrn in persons_to_add: + person_email = users_by_hrn[person_hrn].get('email', None) + if person_email and person_email in persons_by_email.keys(): + # check if the user already exist in PL + person_id = persons_by_email[person_email]['person_id'] + self.driver.shell.AddPersonToSlice(person_id, slice['slice_id']) + persons_to_verify_keys[person_id] = users_by_hrn[person_hrn] - # add the registry record -# if sfa_peer: -# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \ -# 'pointer': person['person_id']} -# self.registry.register_peer_object(self.credential, peer_dict) + else: + person = { + 'first_name': person_hrn, + 'last_name': person_hrn, + 'email': users_by_hrn[person_hrn].get('email', "%s@geni.net"%person_hrn.split('.')[-1]), + } - for added_slice_user_id in added_slice_user_ids.union(added_user_ids): - # add person to the slice - self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name']) - # if this is a peer record then it should already be bound to a peer. - # no need to return worry about it getting bound later + person_id = self.driver.shell.AddPerson(person) + self.driver.shell.AddRoleToPerson('user', int(person_id)) + # enable the account + self.driver.shell.UpdatePerson(int(person_id), {'enabled': True}) + self.driver.shell.SetPersonHrn(int(person_id), person_hrn) + self.driver.shell.SetPersonSfaCreated(int(person_id), 'True') + self.driver.shell.AddPersonToSite(int(person_id), site['site_id']) + self.driver.shell.AddPersonToSlice(int(person_id), slice['slice_id']) - return added_persons + # Add keys + for key in users_by_hrn[person_hrn].get('keys', []): + key = {'key':key, 'key_type':'ssh'} + self.driver.shell.AddPersonKey(person_id, key) - def verify_keys(self, persons, users, peer, options={}): - # existing keys - key_ids = [] - for person in persons: - key_ids.extend(person['key_ids']) - keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key']) - keydict = {} - for key in keylist: - keydict[key['key']] = key['key_id'] - existing_keys = keydict.keys() - persondict = {} - for person in persons: - persondict[person['email']] = person + # Delete persons from slice + for person_hrn in persons_to_delete: + person_id = slice_persons_by_hrn[person_hrn].get('person_id') + slice_id = slice['slice_id'] + self.driver.shell.DeletePersonFromSlice(person_id, slice_id) + + + # Update kept persons + for person_hrn in persons_to_keep: + person_id = slice_persons_by_hrn[person_hrn].get('person_id') + persons_to_verify_keys[person_id] = users_by_hrn[person_hrn] + + self.verify_keys(persons_to_verify_keys, peer, options) + + return persons_to_add + + + def verify_keys(self, persons_to_verify_keys, peer, options={}): + # we only add keys that comes from sfa to persons in PL + for person_id in persons_to_verify_keys: + person_sfa_keys = persons_to_verify_keys[person_id].get('keys', []) + person_pl_keys = self.driver.shell.GetKeys({'person_id': int(person_id)}) + person_pl_keys_list = [key['key'] for key in person_pl_keys] + + keys_to_add = set(person_sfa_keys).difference(person_pl_keys_list) + + for key_string in keys_to_add: + key = {'key': key_string, 'key_type': 'ssh'} + self.driver.shell.AddPersonKey(int(person_id), key) - # add new keys - requested_keys = [] - updated_persons = [] - for user in users: - user_keys = user.get('keys', []) - updated_persons.append(user) - for key_string in user_keys: - requested_keys.append(key_string) - if key_string not in existing_keys: - key = {'key': key_string, 'key_type': 'ssh'} - try: - if peer: - person = persondict[user['email']] - self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname']) - key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key) - if peer: - key_index = user_keys.index(key['key']) - remote_key_id = user['key_ids'][key_index] - self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id) - - finally: - if peer: - self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id']) - - # remove old keys (only if we are not appending) - append = options.get('append', True) - if append == False: - removed_keys = set(existing_keys).difference(requested_keys) - for existing_key_id in keydict: - if keydict[existing_key_id] in removed_keys: - try: - if peer: - self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname']) - self.driver.shell.DeleteKey(existing_key_id) - except: - pass def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False): append = options.get('append', True) @@ -685,7 +572,8 @@ class PlSlices: # get sliver attributes added_slice_attributes = [] removed_slice_attributes = [] - ignored_slice_attribute_names = [] + # we need to keep the slice hrn anyway + ignored_slice_attribute_names = ['hrn'] existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']}) # get attributes that should be removed @@ -694,6 +582,7 @@ class PlSlices: # If a slice already has a admin only role it was probably given to them by an # admin, so we should ignore it. ignored_slice_attribute_names.append(slice_tag['tagname']) + attribute_found=True else: # If an existing slice attribute was not found in the request it should # be removed diff --git a/sfa/planetlab/plxrn.py b/sfa/planetlab/plxrn.py index a8461b53..c9fa5ca0 100644 --- a/sfa/planetlab/plxrn.py +++ b/sfa/planetlab/plxrn.py @@ -22,21 +22,21 @@ def xrn_to_hostname(hrn): return Xrn.unescape(PlXrn(xrn=hrn, type='node').get_leaf()) # helpers to handle external objects created via fedaration -def xrn_to_ext_slicename (xrn): - slice_hrn=PlXrn(xrn=xrn,type='slice').get_hrn() - site_hrn = get_authority(slice_hrn) - login_base = '8'.join(site_hrn.split('.')) - slice_name = '_'.join([login_base, slice_hrn.split('.')[-1]]) - return slice_name - -def hrn_to_ext_loginbase (hrn): - site_hrn = get_authority(hrn) - login_base = '8'.join(site_hrn.split('.'))[:20] - return login_base - def top_auth (hrn): return hrn.split('.')[0] +def hash_loginbase(site_hrn): + if len(site_hrn) <= 12: + return site_hrn.replace('.','8') + ratio = float(12) / len(site_hrn) + auths_tab = site_hrn.split('.') + auths_tab2 = [] + for auth in auths_tab: + auth2 = auth[:int(len(auth)*ratio)] + auths_tab2.append(auth2) + + return '8'.join(auths_tab2) + class PlXrn (Xrn): @staticmethod diff --git a/sfa/util/faults.py b/sfa/util/faults.py index f1d5cfd1..4a614b1d 100644 --- a/sfa/util/faults.py +++ b/sfa/util/faults.py @@ -98,7 +98,7 @@ class SfaNotImplemented(SfaFault): class SfaAPIError(SfaFault): def __init__(self, extra = None): - faultString = "Internal API error" + faultString = "Internal SFA API error" SfaFault.__init__(self, GENICODE.SERVERERROR, faultString, extra) class MalformedHrnException(SfaFault):