From: Thierry Parmentelat Date: Fri, 27 Jan 2012 10:36:57 +0000 (+0100) Subject: merge master again (2.0-10 changelog only) X-Git-Tag: sfa-2.1-1~5 X-Git-Url: http://git.onelab.eu/?p=sfa.git;a=commitdiff_plain;h=862dfa7f7b8cce8c17e80c42aedd8d500ea86cb6;hp=5dd712d2c54e90dcdff9bcacef7a12cb817aad44 merge master again (2.0-10 changelog only) --- diff --git a/Makefile b/Makefile index a21ce8ff..79b71c5a 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ python-install: python setup.py install --root=$(DESTDIR) chmod 444 $(DESTDIR)/etc/sfa/default_config.xml rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info - rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/sfa.sql + rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/migrations (cd $(DESTDIR)/usr/bin ; ln -s sfi.py sfi; ln -s sfascan.py sfascan) python-clean: version-clean @@ -146,28 +146,36 @@ ifeq (,$(SSHURL)) @exit 1 endif -sync: synccheck + +synclib: synccheck +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/ - +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa +syncbin: synccheck +$(RSYNC) $(BINS) $(SSHURL)/usr/bin/ +syncinit: synccheck +$(RSYNC) ./init.d/sfa $(SSHURL)/etc/init.d/ +syncconfig: +$(RSYNC) ./config/default_config.xml $(SSHURL)/etc/sfa/ - +$(RSYNC) ./sfa/storage/sfa.sql $(SSHURL)/usr/share/sfa/ +synctest: synccheck + +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa +syncrestart: synccheck $(SSHCOMMAND) exec service sfa restart +syncmig: + +$(RSYNC) ./sfa/storage/migrations $(SSHURL)/usr/share/sfa/ + + +# full-fledged +sync: synclib syncbin syncinit syncconfig syncrestart # 99% of the time this is enough -fastsync: synccheck - +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/ - $(SSHCOMMAND) exec service sfa restart +syncfast: synclib syncrestart -clientsync: synccheck - +$(RSYNC) $(BINS) $(SSHURL)/usr/bin/ +.PHONY: synccheck synclib syncbin syncconfig synctest syncrestart sync syncfast -ricasync: synccheck +syncrica: synccheck +$(RSYNC) --relative ./sfa/fd ./sfa/generic/fd.py ./sfa/rspecs/versions/federica.py $(SSHURL)/usr/lib\*/python2.\*/site-packages/ $(SSHCOMMAND) exec service sfa restart -.PHONY: synccheck sync fastsync clientsync ricasync +.PHONY: syncrica ########## CLIENTLIBFILES= \ diff --git a/init.d/sfa b/init.d/sfa index 32cc1a07..7c51b488 100755 --- a/init.d/sfa +++ b/init.d/sfa @@ -59,6 +59,18 @@ function postgresql_check () { return 1 } +# use a single date of this script invocation for the dump_*_db functions. +DATE=$(date +"%Y-%m-%d-%H-%M-%S") + +# Dumps the database - optional argument to specify filename suffix +function dump_sfa_db() { + if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi + mkdir -p /usr/share/sfa/backups + dumpfile=/usr/share/sfa/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql") + pg_dump -U $SFA_DB_USER $SFA_DB_NAME > $dumpfile + echo "Saved sfa database in $dumpfile" + check +} # Regenerate configuration files - almost verbatim from plc.init function reload () { @@ -217,9 +229,6 @@ function db_start () { if ! psql -U $SFA_DB_USER -c "" $SFA_DB_NAME >/dev/null 2>&1 ; then createdb -U postgres --template=template0 --encoding=UNICODE --owner=$SFA_DB_USER $SFA_DB_NAME check - # install db schema - psql -U $SFA_DB_USER -f /usr/share/sfa/sfa.sql $SFA_DB_NAME - check fi check @@ -249,6 +258,7 @@ function start() { reload db_start + # migrations are now handled in the code by sfa.storage.dbschema # install peer certs action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS @@ -290,8 +300,11 @@ case "$1" in status sfa-start.py RETVAL=$? ;; + dbdump) + dump_sfa_db + ;; *) - echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}" + echo $"Usage: $0 {start|stop|reload|restart|condrestart|status|dbdump}" exit 1 ;; esac diff --git a/setup.py b/setup.py index 033a7d4d..c1e84532 100755 --- a/setup.py +++ b/setup.py @@ -64,7 +64,8 @@ data_files = [ ('/etc/sfa/', [ 'config/aggregates.xml', ('/etc/sfatables/matches/', glob('sfatables/matches/*.xml')), ('/etc/sfatables/targets/', glob('sfatables/targets/*.xml')), ('/etc/init.d/', [ "init.d/%s"%x for x in initscripts ]), - ('/usr/share/sfa/', [ 'sfa/storage/sfa.sql' ] ), + ('/usr/share/sfa/migrations', glob('sfa/storage/migrations/*.*') ), + ('/usr/share/sfa/migrations/versions', glob('sfa/storage/migrations/versions/*') ), ('/usr/share/sfa/examples/', glob('sfa/examples/*' ) + [ 'cron.d/sfa.cron' ] ), ] diff --git a/sfa.spec b/sfa.spec index 52e55dce..963abef0 100644 --- a/sfa.spec +++ b/sfa.spec @@ -1,6 +1,6 @@ %define name sfa -%define version 2.0 -%define taglevel 10 +%define version 2.1 +%define taglevel 0 %define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}} %global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" ) @@ -24,7 +24,10 @@ URL: %{SCMURL} Summary: the SFA python libraries Group: Applications/System BuildRequires: make + +Requires: myplc-config Requires: python >= 2.5 +Requires: pyOpenSSL >= 0.7 Requires: m2crypto Requires: xmlsec1-openssl-devel Requires: libxslt-python @@ -40,8 +43,10 @@ Requires: python-dateutil Requires: postgresql >= 8.2, postgresql-server >= 8.2 Requires: postgresql-python Requires: python-psycopg2 -Requires: pyOpenSSL >= 0.7 -Requires: myplc-config +# f8=0.4 - f12=0.5 f14=0.6 f16=0.7 +Requires: python-sqlalchemy +Requires: python-migrate +# the eucalyptus aggregate uses this module Requires: python-xmlbuilder # python 2.5 has uuid module added, for python 2.4 we still need it. @@ -148,7 +153,7 @@ rm -rf $RPM_BUILD_ROOT %config /etc/sfa/default_config.xml %config (noreplace) /etc/sfa/aggregates.xml %config (noreplace) /etc/sfa/registries.xml -/usr/share/sfa/sfa.sql +/usr/share/sfa/migrations /usr/share/sfa/examples /var/www/html/wsdl/*.wsdl @@ -197,18 +202,18 @@ rm -rf $RPM_BUILD_ROOT %files tests %{_datadir}/sfa/tests -### sfa-plc installs the 'sfa' service -%post plc +### sfa installs the 'sfa' service +%post chkconfig --add sfa -%preun plc +%preun if [ "$1" = 0 ] ; then /sbin/service sfa stop || : /sbin/chkconfig --del sfa || : fi -%postun plc -[ "$1" -ge "1" ] && service sfa restart +%postun +[ "$1" -ge "1" ] && { service sfa dbdump ; service sfa restart ; } ### sfa-cm installs the 'sfa-cm' service %post cm diff --git a/sfa/client/sfi.py b/sfa/client/sfi.py index 21774719..9ddef9fd 100644 --- a/sfa/client/sfi.py +++ b/sfa/client/sfi.py @@ -28,7 +28,8 @@ from sfa.util.config import Config from sfa.util.version import version_core from sfa.util.cache import Cache -from sfa.storage.record import SfaRecord, UserRecord, SliceRecord, NodeRecord, AuthorityRecord +from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode +from sfa.storage.model import make_record from sfa.rspecs.rspec import RSpec from sfa.rspecs.rspec_converter import RSpecConverter @@ -106,44 +107,35 @@ def save_rspec_to_file(rspec, filename): f.close() return -def save_records_to_file(filename, recordList, format="xml"): +def save_records_to_file(filename, record_dicts, format="xml"): if format == "xml": index = 0 - for record in recordList: + for record_dict in record_dicts: if index > 0: - save_record_to_file(filename + "." + str(index), record) + save_record_to_file(filename + "." + str(index), record_dict) else: - save_record_to_file(filename, record) + save_record_to_file(filename, record_dict) index = index + 1 elif format == "xmllist": f = open(filename, "w") f.write("\n") - for record in recordList: - record = SfaRecord(dict=record) - f.write('\n') + for record_dict in record_dicts: + record_obj=make_record (dict=record_dict) + f.write('\n') f.write("\n") f.close() elif format == "hrnlist": f = open(filename, "w") - for record in recordList: - record = SfaRecord(dict=record) - f.write(record.get_name() + "\n") + for record_dict in record_dicts: + record_obj=make_record (dict=record_dict) + f.write(record_obj.hrn + "\n") f.close() else: # this should never happen print "unknown output format", format -def save_record_to_file(filename, record): - if record['type'] in ['user']: - record = UserRecord(dict=record) - elif record['type'] in ['slice']: - record = SliceRecord(dict=record) - elif record['type'] in ['node']: - record = NodeRecord(dict=record) - elif record['type'] in ['authority', 'ma', 'sa']: - record = AuthorityRecord(dict=record) - else: - record = SfaRecord(dict=record) +def save_record_to_file(filename, record_dict): + rec_record = make_record (dict=record_dict) str = record.save_to_string() f=codecs.open(filename, encoding='utf-8',mode="w") f.write(str) @@ -154,10 +146,9 @@ def save_record_to_file(filename, record): # load methods def load_record_from_file(filename): f=codecs.open(filename, encoding="utf-8", mode="r") - str = f.read() + xml_string = f.read() f.close() - record = SfaRecord(string=str) - return record + return make_record (xml=xml_string) import uuid @@ -710,27 +701,16 @@ or version information about sfi itself self.print_help() sys.exit(1) hrn = args[0] - records = self.registry().Resolve(hrn, self.my_credential_string) - records = filter_records(options.type, records) - if not records: + record_dicts = self.registry().Resolve(hrn, self.my_credential_string) + record_dicts = filter_records(options.type, record_dicts) + if not record_dicts: self.logger.error("No record of type %s"% options.type) + records = [ make_record (dict=record_dict) for record_dict in record_dicts ] for record in records: - if record['type'] in ['user']: - record = UserRecord(dict=record) - elif record['type'] in ['slice']: - record = SliceRecord(dict=record) - elif record['type'] in ['node']: - record = NodeRecord(dict=record) - elif record['type'].startswith('authority'): - record = AuthorityRecord(dict=record) - else: - record = SfaRecord(dict=record) - if (options.format == "text"): - record.dump() - else: - print record.save_to_string() + if (options.format == "text"): record.dump() + else: print record.save_as_xml() if options.file: - save_records_to_file(options.file, records, options.fileformat) + save_records_to_file(options.file, record_dicts, options.fileformat) return def add(self, options, args): @@ -741,7 +721,7 @@ or version information about sfi itself sys.exit(1) record_filepath = args[0] rec_file = self.get_record_file(record_filepath) - record = load_record_from_file(rec_file).as_dict() + record = load_record_from_file(rec_file).todict() return self.registry().Register(record, auth_cred) def update(self, options, args): @@ -751,14 +731,14 @@ or version information about sfi itself sys.exit(1) rec_file = self.get_record_file(args[0]) record = load_record_from_file(rec_file) - if record['type'] == "user": - if record.get_name() == self.user: + if record.type == "user": + if record.hrn == self.user: cred = self.my_credential_string else: cred = self.my_authority_credential_string() - elif record['type'] in ["slice"]: + elif record.type in ["slice"]: try: - cred = self.slice_credential_string(record.get_name()) + cred = self.slice_credential_string(record.hrn) except ServerException, e: # XXX smbaker -- once we have better error return codes, update this # to do something better than a string compare @@ -766,14 +746,14 @@ or version information about sfi itself cred = self.my_authority_credential_string() else: raise - elif record.get_type() in ["authority"]: + elif record.type in ["authority"]: cred = self.my_authority_credential_string() - elif record.get_type() == 'node': + elif record.type == 'node': cred = self.my_authority_credential_string() else: - raise "unknown record type" + record.get_type() - record = record.as_dict() - return self.registry().Update(record, cred) + raise "unknown record type" + record.type + record_dict = record.todict() + return self.registry().Update(record_dict, cred) def remove(self, options, args): "remove registry record by name (Remove)" diff --git a/sfa/importer/sfa-import-openstack.py b/sfa/importer/sfa-import-openstack.py index ec785e81..78032e29 100755 --- a/sfa/importer/sfa-import-openstack.py +++ b/sfa/importer/sfa-import-openstack.py @@ -21,14 +21,18 @@ import sys from sfa.util.config import Config from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename -from sfa.storage.table import SfaTable -from sfa.storage.record import SfaRecord -from sfa.trust.certificate import convert_public_key, Keypair -from sfa.trust.gid import create_uuid -from sfa.importer.sfaImport import sfaImport, _cleanup_string from sfa.util.sfalogging import logger + +from sfa.trust.gid import create_uuid +from sfa.trust.certificate import convert_public_key, Keypair + from sfa.openstack.openstack_shell import OpenstackShell +from sfa.storage.alchemy import dbsession +from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode + +from sfa.importer.sfaImport import sfaImport, _cleanup_string + def process_options(): (options, args) = getopt.getopt(sys.argv[1:], '', []) @@ -71,18 +75,16 @@ def main(): existing_records = {} existing_hrns = [] key_ids = [] - table = SfaTable() - results = table.find() - for result in results: - existing_records[(result['hrn'], result['type'])] = result - existing_hrns.append(result['hrn']) + for record in dbsession.query(RegRecord): + existing_records[ (record.hrn, record.type,) ] = record + existing_hrns.append(record.hrn) # Get all users persons = shell.user_get_all() persons_dict = {} keys_filename = config.config_path + os.sep + 'person_keys.py' - old_person_keys = load_keys(keys_filename) + old_person_keys = load_keys(keys_filename) person_keys = {} for person in persons: hrn = config.SFA_INTERFACE_HRN + "." + person.id @@ -107,10 +109,14 @@ def main(): logger.warn("Import: person %s does not have a PL public key"%hrn) pkey = Keypair(create=True) person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \ - authority=get_authority(hrn)) - logger.info("Import: importing %s " % person_record.summary_string()) - person_record.sync() + person_record = RegUser () + person_record.type='user' + person_record.hrn=hrn + person_record.gid=person_gid + person_record.authority=get_authority(hrn) + dbsession.add(person_record) + dbsession.commit() + logger.info("Import: imported person %s" % person_record) # Get all projects projects = shell.project_get_all() @@ -123,11 +129,14 @@ def main(): pkey = Keypair(create=True) urn = hrn_to_urn(hrn, 'slice') project_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - project_record = SfaRecord(hrn=hrn, gid=project_gid, type="slice", - authority=get_authority(hrn)) - projects_dict[project_record['hrn']] = project_record - logger.info("Import: importing %s " % project_record.summary_string()) - project_record.sync() + project_record = RegSlice () + project_record.type='slice' + project_record.hrn=hrn + project_record.gid=project_gid + project_record.authority=get_authority(hrn) + dbsession.add(project_record) + dbsession.commit() + logger.info("Import: imported slice: %s" % project_record) # remove stale records system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager'] @@ -136,7 +145,7 @@ def main(): continue record = existing_records[(record_hrn, type)] - if record['peer_authority']: + if record.peer_authority: continue if type == 'user': @@ -148,10 +157,10 @@ def main(): else: continue - record_object = existing_records[(record_hrn, type)] - record = SfaRecord(dict=record_object) - logger.info("Import: removing %s " % record.summary_string()) - record.delete() + record_object = existing_records[ (record_hrn, type) ] + logger.info("Import: removing %s " % record) + dbsession.delete(record_object) + dbsession.commit() # save pub keys logger.info('Import: saving current pub keys') diff --git a/sfa/importer/sfa-import-plc.py b/sfa/importer/sfa-import-plc.py index 723f473f..36617bb9 100755 --- a/sfa/importer/sfa-import-plc.py +++ b/sfa/importer/sfa-import-plc.py @@ -21,13 +21,17 @@ import sys from sfa.util.config import Config from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename -from sfa.storage.table import SfaTable -from sfa.storage.record import SfaRecord + from sfa.trust.gid import create_uuid from sfa.trust.certificate import convert_public_key, Keypair -from sfa.importer.sfaImport import sfaImport, _cleanup_string + from sfa.plc.plshell import PlShell +from sfa.storage.alchemy import dbsession +from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode + +from sfa.importer.sfaImport import sfaImport, _cleanup_string + def process_options(): (options, args) = getopt.getopt(sys.argv[1:], '', []) @@ -62,31 +66,29 @@ def _get_site_hrn(interface_hrn, site): hrn = ".".join([interface_hrn, "internet2", site['login_base']]) return hrn +# one would think this code could use a call to DBSchema +# however now import s expected to be done after service creation def main(): process_options() config = Config() + sfaImporter = sfaImport() + logger=sfaImporter.logger + logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL) if not config.SFA_REGISTRY_ENABLED: sys.exit(0) root_auth = config.SFA_REGISTRY_ROOT_AUTH interface_hrn = config.SFA_INTERFACE_HRN - keys_filename = config.config_path + os.sep + 'person_keys.py' - sfaImporter = sfaImport() - sfaImporter.create_top_level_records() - logger=sfaImporter.logger - logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL) shell = PlShell (config) + sfaImporter.create_top_level_records() # create dict of all existing sfa records existing_records = {} existing_hrns = [] key_ids = [] - person_keys = {} - table = SfaTable() - results = table.find() - for result in results: - existing_records[(result['hrn'], result['type'])] = result - existing_hrns.append(result['hrn']) + for record in dbsession.query(RegRecord): + existing_records[ (record.hrn, record.type,) ] = record + existing_hrns.append(record.hrn) # Get all plc sites sites = shell.GetSites({'peer_id': None}) @@ -109,7 +111,9 @@ def main(): keys_dict[key['key_id']] = key['key'] # create a dict of person keys keyed on key_id + keys_filename = config.config_path + os.sep + 'person_keys.py' old_person_keys = load_keys(keys_filename) + person_keys = {} for person in persons: pubkeys = [] for key_id in person['key_ids']: @@ -134,41 +138,46 @@ def main(): i2site = {'name': 'Internet2', 'abbreviated_name': 'I2', 'login_base': 'internet2', 'site_id': -1} site_hrn = _get_site_hrn(interface_hrn, i2site) - logger.info("Importing site: %s" % site_hrn) # import if hrn is not in list of existing hrns or if the hrn exists # but its not a site record if site_hrn not in existing_hrns or \ (site_hrn, 'authority') not in existing_records: - logger.info("Import: site %s " % site_hrn) urn = hrn_to_urn(site_hrn, 'authority') if not sfaImporter.AuthHierarchy.auth_exists(urn): sfaImporter.AuthHierarchy.create_auth(urn) auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn) - auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \ - type="authority", pointer=site['site_id'], - authority=get_authority(site_hrn)) - auth_record.sync(verbose=True) + auth_record = RegAuthority() + auth_record.type='authority' + auth_record.hrn=site_hrn + auth_record.gid=auth_info.get_gid_object() + auth_record.pointer=site['site_id'] + auth_record.authority=get_authority(site_hrn) + dbsession.add(auth_record) + dbsession.commit() + logger.info("Import: Imported authority (vini site) %s"%auth_record) # start importing for site in sites: site_hrn = _get_site_hrn(interface_hrn, site) - logger.info("Importing site: %s" % site_hrn) # import if hrn is not in list of existing hrns or if the hrn exists # but its not a site record if site_hrn not in existing_hrns or \ (site_hrn, 'authority') not in existing_records: try: - logger.info("Import: site %s " % site_hrn) urn = hrn_to_urn(site_hrn, 'authority') if not sfaImporter.AuthHierarchy.auth_exists(urn): sfaImporter.AuthHierarchy.create_auth(urn) auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn) - auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \ - type="authority", pointer=site['site_id'], - authority=get_authority(site_hrn)) - logger.info("Import: importing site: %s" % auth_record.summary_string()) - auth_record.sync() + auth_record = RegAuthority() + auth_record.type='authority' + auth_record.hrn=site_hrn + auth_record.gid=auth_info.get_gid_object() + auth_record.pointer=site['site_id'] + auth_record.authority=get_authority(site_hrn) + dbsession.add(auth_record) + dbsession.commit() + logger.info("Import: imported authority (site) : %s" % auth_record) except: # if the site import fails then there is no point in trying to import the # site's child records (node, slices, persons), so skip them. @@ -191,9 +200,15 @@ def main(): pkey = Keypair(create=True) urn = hrn_to_urn(hrn, 'node') node_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'], authority=get_authority(hrn)) - logger.info("Import: importing node: %s" % node_record.summary_string()) - node_record.sync() + node_record = RegNode () + node_record.type='node' + node_record.hrn=hrn + node_record.gid=node_gid + node_record.pointer =node['node_id'] + node_record.authority=get_authority(hrn) + dbsession.add(node_record) + dbsession.commit() + logger.info("Import: imported node: %s" % node_record) except: logger.log_exc("Import: failed to import node") @@ -212,10 +227,15 @@ def main(): pkey = Keypair(create=True) urn = hrn_to_urn(hrn, 'slice') slice_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'], - authority=get_authority(hrn)) - logger.info("Import: importing slice: %s" % slice_record.summary_string()) - slice_record.sync() + slice_record = RegSlice () + slice_record.type='slice' + slice_record.hrn=hrn + slice_record.gid=slice_gid + slice_record.pointer=slice['slice_id'] + slice_record.authority=get_authority(hrn) + dbsession.add(slice_record) + dbsession.commit() + logger.info("Import: imported slice: %s" % slice_record) except: logger.log_exc("Import: failed to import slice") @@ -228,8 +248,8 @@ def main(): if len(hrn) > 64: hrn = hrn[:64] - # if user's primary key has chnaged then we need to update the - # users gid by forcing a update here + # if user's primary key has changed then we need to update the + # users gid by forcing an update here old_keys = [] new_keys = [] if person_id in old_person_keys: @@ -257,10 +277,15 @@ def main(): pkey = Keypair(create=True) urn = hrn_to_urn(hrn, 'user') person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \ - pointer=person['person_id'], authority=get_authority(hrn)) - logger.info("Import: importing person: %s" % person_record.summary_string()) - person_record.sync() + person_record = RegUser () + person_record.type='user' + person_record.hrn=hrn + person_record.gid=person_gid + person_record.pointer=person['person_id'] + person_record.authority=get_authority(hrn) + dbsession.add (person_record) + dbsession.commit() + logger.info("Import: imported person: %s" % person_record) except: logger.log_exc("Import: failed to import person.") @@ -271,7 +296,7 @@ def main(): continue record = existing_records[(record_hrn, type)] - if record['peer_authority']: + if record.peer_authority: continue # dont delete vini's internet2 placeholdder record @@ -285,7 +310,7 @@ def main(): if type == 'authority': for site in sites: site_hrn = interface_hrn + "." + site['login_base'] - if site_hrn == record_hrn and site['site_id'] == record['pointer']: + if site_hrn == record_hrn and site['site_id'] == record.pointer: found = True break @@ -299,7 +324,7 @@ def main(): alt_username = person['email'].split("@")[0].replace(".", "_").replace("+", "_") if username in [tmp_username, alt_username] and \ site['site_id'] in person['site_ids'] and \ - person['person_id'] == record['pointer']: + person['person_id'] == record.pointer: found = True break @@ -307,7 +332,7 @@ def main(): slicename = hrn_to_pl_slicename(record_hrn) for slice in slices: if slicename == slice['name'] and \ - slice['slice_id'] == record['pointer']: + slice['slice_id'] == record.pointer: found = True break @@ -320,7 +345,7 @@ def main(): tmp_nodename = node['hostname'] if tmp_nodename == nodename and \ node['site_id'] == site['site_id'] and \ - node['node_id'] == record['pointer']: + node['node_id'] == record.pointer: found = True break else: @@ -329,9 +354,9 @@ def main(): if not found: try: record_object = existing_records[(record_hrn, type)] - record = SfaRecord(dict=record_object) - logger.info("Import: deleting record: %s" % record.summary_string()) - record.delete() + logger.info("Import: deleting record: %s" % record) + dbsession.delete(record_object) + dbsession.commit() except: logger.log_exc("Import: failded to delete record") # save pub keys diff --git a/sfa/importer/sfa-nuke-plc.py b/sfa/importer/sfa-nuke-plc.py index bd116c88..a4967c36 100755 --- a/sfa/importer/sfa-nuke-plc.py +++ b/sfa/importer/sfa-nuke-plc.py @@ -13,22 +13,31 @@ from optparse import OptionParser from sfa.util.sfalogging import logger -from sfa.storage.table import SfaTable +from sfa.storage.alchemy import engine +from sfa.storage.dbschema import DBSchema def main(): - usage="%prog: trash the registry DB (the 'sfa' table in the 'planetlab5' database)" + usage="%prog: trash the registry DB" parser = OptionParser(usage=usage) - parser.add_option('-f','--file-system',dest='clean_fs',action='store_true',default=False, - help='Clean up the /var/lib/sfa/authorities area as well') - parser.add_option('-c','--certs',dest='clean_certs',action='store_true',default=False, - help='Remove all cached certs/gids found in /var/lib/sfa/authorities area as well') + parser.add_option("-f","--file-system",dest='clean_fs',action='store_true',default=False, + help="Clean up the /var/lib/sfa/authorities area as well") + parser.add_option("-c","--certs",dest='clean_certs',action='store_true',default=False, + help="Remove all cached certs/gids found in /var/lib/sfa/authorities area as well") + parser.add_option("-0","--no-reinit",dest='reinit',action='store_false',default=True, + help="By default a new DB schema is installed after the cleanup; this option prevents that") (options,args)=parser.parse_args() if args: parser.print_help() sys.exit(1) + dbschema=DBSchema() logger.info("Purging SFA records from database") - table = SfaTable() - table.nuke() + dbschema.nuke() + # for convenience we re-create the schema here, so there's no need for an explicit + # service sfa restart + # however in some (upgrade) scenarios this might be wrong + if options.reinit: + logger.info("re-creating empty schema") + dbschema.init_or_upgrade() if options.clean_certs: # remove the server certificate and all gids found in /var/lib/sfa/authorities diff --git a/sfa/importer/sfaImport.py b/sfa/importer/sfaImport.py index ca0bff2f..e2101fcc 100644 --- a/sfa/importer/sfaImport.py +++ b/sfa/importer/sfaImport.py @@ -16,9 +16,8 @@ from sfa.trust.certificate import convert_public_key, Keypair from sfa.trust.trustedroots import TrustedRoots from sfa.trust.hierarchy import Hierarchy from sfa.trust.gid import create_uuid -from sfa.storage.table import SfaTable -from sfa.storage.record import SfaRecord - +from sfa.storage.model import RegRecord, RegAuthority, RegUser +from sfa.storage.alchemy import dbsession def _un_unicode(str): if isinstance(str, unicode): @@ -49,7 +48,6 @@ class sfaImport: def __init__(self): self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog') self.AuthHierarchy = Hierarchy() -# self.table = SfaTable() self.config = Config() self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config)) self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH @@ -67,6 +65,9 @@ class sfaImport: # create interface records self.logger.info("Import: creating interface records") +# xxx authority+ turning off the creation of authority+* +# in fact his is required - used in SfaApi._getCredentialRaw +# that tries to locate 'authority+sa' self.create_interface_records() # add local root authority's cert to trusted list @@ -85,13 +86,19 @@ class sfaImport: if not parent_hrn == hrn: self.create_top_level_auth_records(parent_hrn) - # enxure key and cert exists: + # ensure key and cert exists: self.AuthHierarchy.create_top_level_auth(hrn) # create the db record if it doesnt already exist auth_info = self.AuthHierarchy.get_auth_info(hrn) - auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1, authority=get_authority(hrn)) - self.logger.info("Import: importing %s " % auth_record.summary_string()) - auth_record.sync() + auth_record = RegAuthority() + auth_record.type='authority' + auth_record.hrn=hrn + auth_record.gid=auth_info.get_gid_object() + auth_record.authority=get_authority(hrn) + auth_record.just_created() + dbsession.add (auth_record) + dbsession.commit() + self.logger.info("Import: imported authority (parent) %s " % auth_record) def create_sm_client_record(self): """ @@ -104,34 +111,42 @@ class sfaImport: self.AuthHierarchy.create_auth(urn) auth_info = self.AuthHierarchy.get_auth_info(hrn) - record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), \ - type="user", pointer=-1, authority=get_authority(hrn)) - self.logger.info("Import: importing %s " % record.summary_string()) - record.sync() + user_record = RegUser() + user_record.type='user' + user_record.hrn=hrn + user_record.gid=auth_info.get_gid_object() + user_record.authority=get_authority(hrn) + user_record.just_created() + dbsession.add (user_record) + dbsession.commit() + self.logger.info("Import: importing user (slicemanager) %s " % user_record) def create_interface_records(self): """ Create a record for each SFA interface """ # just create certs for all sfa interfaces even if they - # arent enabled + # aren't enabled hrn = self.config.SFA_INTERFACE_HRN - interfaces = ['authority+sa', 'authority+am', 'authority+sm'] - table = SfaTable() auth_info = self.AuthHierarchy.get_auth_info(hrn) pkey = auth_info.get_pkey_object() - for interface in interfaces: - urn = hrn_to_urn(hrn, interface) + for type in [ 'authority+sa', 'authority+am', 'authority+sm', ]: + urn = hrn_to_urn(hrn, type) gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey) - interface_record = SfaRecord(hrn=hrn, type=interface, pointer=-1, - gid = gid, authority=get_authority(hrn)) - self.logger.info("Import: importing %s " % interface_record.summary_string()) - interface_record.sync() + # xxx this should probably use a RegAuthority, or a to-be-defined RegPeer object + # but for now we have to preserve the authority+<> stuff + interface_record = RegAuthority() + interface_record.type=type + interface_record.hrn=hrn + interface_record.gid= gid + interface_record.authority=get_authority(hrn) + interface_record.just_created() + dbsession.add (interface_record) + dbsession.commit() + self.logger.info("Import: imported authority (%s) %s " % (type,interface_record)) def delete_record(self, hrn, type): # delete the record - table = SfaTable() - record_list = table.find({'type': type, 'hrn': hrn}) - for record in record_list: - self.logger.info("Import: removing record %s %s" % (type, hrn)) - table.remove(record) + for rec in dbsession.query(RegRecord).filter_by(type=type,hrn=hrn): + dbsession.delete(rec) + dbsession.commit() diff --git a/sfa/managers/registry_manager.py b/sfa/managers/registry_manager.py index b5be45ff..79ac91c0 100644 --- a/sfa/managers/registry_manager.py +++ b/sfa/managers/registry_manager.py @@ -1,5 +1,4 @@ import types -import time # for get_key_from_incoming_ip import tempfile import os @@ -19,8 +18,8 @@ from sfa.trust.credential import Credential from sfa.trust.certificate import Certificate, Keypair, convert_public_key from sfa.trust.gid import create_uuid -from sfa.storage.record import SfaRecord -from sfa.storage.table import SfaTable +from sfa.storage.model import make_record,RegRecord +from sfa.storage.alchemy import dbsession class RegistryManager: @@ -49,19 +48,19 @@ class RegistryManager: auth_hrn = api.auth.get_authority(hrn) if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN: auth_hrn = hrn - # get record info auth_info = api.auth.get_auth_info(auth_hrn) - table = SfaTable() - records = table.findObjects({'type': type, 'hrn': hrn}) - if not records: - raise RecordNotFound(hrn) - record = records[0] + # get record info + record=dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first() + if not record: + raise RecordNotFound("hrn=%s, type=%s"%(hrn,type)) # verify_cancreate_credential requires that the member lists # (researchers, pis, etc) be filled in - self.driver.augment_records_with_testbed_info (record) - if not self.driver.is_enabled (record): - raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email'])) + logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys()) + self.driver.augment_records_with_testbed_info (record.__dict__) + logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys()) + if not self.driver.is_enabled (record.__dict__): + raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email)) # get the callers gid # if this is a self cred the record's gid is the caller's gid @@ -73,12 +72,12 @@ class RegistryManager: caller_hrn = caller_gid.get_hrn() object_hrn = record.get_gid_object().get_hrn() - rights = api.auth.determine_user_rights(caller_hrn, record) + rights = api.auth.determine_user_rights(caller_hrn, record.__dict__) # make sure caller has rights to this object if rights.is_empty(): - raise PermissionError(caller_hrn + " has no rights to " + record['name']) + raise PermissionError(caller_hrn + " has no rights to " + record.hrn) - object_gid = GID(string=record['gid']) + object_gid = GID(string=record.gid) new_cred = Credential(subject = object_gid.get_subject()) new_cred.set_gid_caller(caller_gid) new_cred.set_gid_object(object_gid) @@ -86,8 +85,8 @@ class RegistryManager: #new_cred.set_pubkey(object_gid.get_pubkey()) new_cred.set_privileges(rights) new_cred.get_privileges().delegate_all_privileges(True) - if 'expires' in record: - date = utcparse(record['expires']) + if hasattr(record,'expires'): + date = utcparse(record.expires) expires = datetime_to_epoch(date) new_cred.set_expiration(int(expires)) auth_kind = "authority,ma,sa" @@ -107,10 +106,11 @@ class RegistryManager: if not type: type = Xrn(xrns).get_type() hrns = [urn_to_hrn(xrn)[0] for xrn in xrns] + # load all known registry names into a prefix tree and attempt to find # the longest matching prefix - # create a dict where key is a registry hrn and its value is a - # hrns at that registry (determined by the known prefix tree). + # create a dict where key is a registry hrn and its value is a list + # of hrns at that registry (determined by the known prefix tree). xrn_dict = {} registries = api.registries tree = prefixTree() @@ -136,47 +136,52 @@ class RegistryManager: credential = api.getCredential() interface = api.registries[registry_hrn] server_proxy = api.server_proxy(interface, credential) - peer_records = server_proxy.Resolve(xrns, credential) - records.extend([SfaRecord(dict=record).as_dict() for record in peer_records]) + peer_records = server_proxy.Resolve(xrns, credential,type) + # pass foreign records as-is + # previous code used to read + # records.extend([SfaRecord(dict=record).as_dict() for record in peer_records]) + # not sure why the records coming through xmlrpc had to be processed at all + records.extend(peer_records) # try resolving the remaining unfound records at the local registry local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) ) # - table = SfaTable() - local_records = table.findObjects({'hrn': local_hrns}) + local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns)) + if type: + local_records = local_records.filter_by(type=type) + local_records=local_records.all() + logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type)) + local_dicts = [ record.__dict__ for record in local_records ] if full: # in full mode we get as much info as we can, which involves contacting the # testbed for getting implementation details about the record - self.driver.augment_records_with_testbed_info(local_records) + self.driver.augment_records_with_testbed_info(local_dicts) # also we fill the 'url' field for known authorities # used to be in the driver code, sounds like a poorman thing though def solve_neighbour_url (record): - if not record['type'].startswith('authority'): return - hrn=record['hrn'] + if not record.type.startswith('authority'): return + hrn=record.hrn for neighbour_dict in [ api.aggregates, api.registries ]: if hrn in neighbour_dict: - record['url']=neighbour_dict[hrn].get_url() + record.url=neighbour_dict[hrn].get_url() return - [ solve_neighbour_url (record) for record in local_records ] - + for record in local_records: solve_neighbour_url (record) - - # convert local record objects to dicts - records.extend([dict(record) for record in local_records]) - if type: - records = filter(lambda rec: rec['type'] in [type], records) - + # convert local record objects to dicts for xmlrpc + # xxx somehow here calling dict(record) issues a weird error + # however record.todict() seems to work fine + # records.extend( [ dict(record) for record in local_records ] ) + records.extend( [ record.todict() for record in local_records ] ) if not records: raise RecordNotFound(str(hrns)) return records - def List(self, api, xrn, origin_hrn=None): + def List (self, api, xrn, origin_hrn=None): hrn, type = urn_to_hrn(xrn) # load all know registry names into a prefix tree and attempt to find # the longest matching prefix - records = [] registries = api.registries registry_hrns = registries.keys() tree = prefixTree() @@ -188,23 +193,24 @@ class RegistryManager: raise MissingAuthority(xrn) # if the best match (longest matching hrn) is not the local registry, # forward the request - records = [] + record_dicts = [] if registry_hrn != api.hrn: credential = api.getCredential() interface = api.registries[registry_hrn] server_proxy = api.server_proxy(interface, credential) record_list = server_proxy.List(xrn, credential) - records = [SfaRecord(dict=record).as_dict() for record in record_list] + # same as above, no need to process what comes from through xmlrpc + # pass foreign records as-is + record_dicts = record_list # if we still have not found the record yet, try the local registry - if not records: + if not record_dicts: if not api.auth.hierarchy.auth_exists(hrn): raise MissingAuthority(hrn) + records = dbsession.query(RegRecord).filter_by(authority=hrn) + record_dicts=[ record.todict() for record in records ] - table = SfaTable() - records = table.find({'authority': hrn}) - - return records + return record_dicts def CreateGid(self, api, xrn, cert): @@ -227,57 +233,58 @@ class RegistryManager: # subject_record describes the subject of the relationships # ref_record contains the target values for the various relationships we need to manage # (to begin with, this is just the slice x person relationship) - def update_relations (self, subject_record, ref_record): - type=subject_record['type'] + def update_relations (self, subject_obj, ref_obj): + type=subject_obj.type if type=='slice': - self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user') + self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user') # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record # hrns is the list of hrns that should be linked to the subject from now on # target_type would be e.g. 'user' in the 'slice' x 'researcher' example - def update_relation (self, sfa_record, field_key, hrns, target_type): + def update_relation (self, record_obj, field_key, hrns, target_type): # locate the linked objects in our db - subject_type=sfa_record['type'] - subject_id=sfa_record['pointer'] - table = SfaTable() - link_sfa_records = table.find ({'type':target_type, 'hrn': hrns}) - link_ids = [ rec.get('pointer') for rec in link_sfa_records ] + subject_type=record_obj.type + subject_id=record_obj.pointer + # get the 'pointer' field of all matching records + link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all() + # sqlalchemy returns named tuples for columns + link_ids = [ tuple.pointer for tuple in link_id_tuples ] self.driver.update_relation (subject_type, target_type, subject_id, link_ids) - - def Register(self, api, record): + def Register(self, api, record_dict): - hrn, type = record['hrn'], record['type'] + hrn, type = record_dict['hrn'], record_dict['type'] urn = hrn_to_urn(hrn,type) # validate the type if type not in ['authority', 'slice', 'node', 'user']: raise UnknownSfaType(type) - # check if record already exists - table = SfaTable() - existing_records = table.find({'type': type, 'hrn': hrn}) + # check if record_dict already exists + existing_records = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).all() if existing_records: raise ExistingRecord(hrn) - record = SfaRecord(dict = record) - record['authority'] = get_authority(record['hrn']) - auth_info = api.auth.get_auth_info(record['authority']) + assert ('type' in record_dict) + # returns the right type of RegRecord according to type in record + record = make_record(dict=record_dict) + record.just_created() + record.authority = get_authority(record.hrn) + auth_info = api.auth.get_auth_info(record.authority) pub_key = None # make sure record has a gid - if 'gid' not in record: + if not record.gid: uuid = create_uuid() pkey = Keypair(create=True) - if 'keys' in record and record['keys']: - pub_key=record['keys'] + if getattr(record,'keys',None): + pub_key=record.keys # use only first key in record - if isinstance(record['keys'], types.ListType): - pub_key = record['keys'][0] + if isinstance(record.keys, types.ListType): + pub_key = record.keys[0] pkey = convert_public_key(pub_key) gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey) gid = gid_object.save_to_string(save_parents=True) - record['gid'] = gid - record.set_gid(gid) + record.gid = gid if type in ["authority"]: # update the tree @@ -286,14 +293,14 @@ class RegistryManager: # get the GID from the newly created authority gid = auth_info.get_gid_object() - record.set_gid(gid.save_to_string(save_parents=True)) + record.gid=gid.save_to_string(save_parents=True) # update testbed-specific data if needed - pointer = self.driver.register (record, hrn, pub_key) + pointer = self.driver.register (record.__dict__, hrn, pub_key) - record.set_pointer(pointer) - record_id = table.insert(record) - record['record_id'] = record_id + record.pointer=pointer + dbsession.add(record) + dbsession.commit() # update membership for researchers, pis, owners, operators self.update_relations (record, record) @@ -301,17 +308,16 @@ class RegistryManager: return record.get_gid_object().save_to_string(save_parents=True) def Update(self, api, record_dict): - new_record = SfaRecord(dict = record_dict) - type = new_record['type'] - hrn = new_record['hrn'] - urn = hrn_to_urn(hrn,type) - table = SfaTable() + assert ('type' in record_dict) + new_record=RegRecord(dict=record_dict) + type = new_record.type + hrn = new_record.hrn + # make sure the record exists - records = table.findObjects({'type': type, 'hrn': hrn}) - if not records: - raise RecordNotFound(hrn) - record = records[0] - record['last_updated'] = time.gmtime() + record = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first() + if not record: + raise RecordNotFound("hrn=%s, type=%s"%(hrn,type)) + record.just_updated() # validate the type if type not in ['authority', 'slice', 'node', 'user']: @@ -319,18 +325,18 @@ class RegistryManager: # Use the pointer from the existing record, not the one that the user # gave us. This prevents the user from inserting a forged pointer - pointer = record['pointer'] + pointer = record.pointer # is the a change in keys ? new_key=None if type=='user': - if 'keys' in new_record and new_record['keys']: - new_key=new_record['keys'] + if getattr(new_key,'keys',None): + new_key=new_record.keys if isinstance (new_key,types.ListType): new_key=new_key[0] # update the PLC information that was specified with the record - if not self.driver.update (record, new_record, hrn, new_key): + if not self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key): logger.warning("driver.update failed") # take new_key into account @@ -338,11 +344,11 @@ class RegistryManager: # update the openssl key and gid pkey = convert_public_key(new_key) uuid = create_uuid() + urn = hrn_to_urn(hrn,type) gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey) gid = gid_object.save_to_string(save_parents=True) - record['gid'] = gid - record = SfaRecord(dict=record) - table.update(record) + record.gid = gid + dsession.commit() # update membership for researchers, pis, owners, operators self.update_relations (record, new_record) @@ -351,19 +357,19 @@ class RegistryManager: # expecting an Xrn instance def Remove(self, api, xrn, origin_hrn=None): - - table = SfaTable() - filter = {'hrn': xrn.get_hrn()} hrn=xrn.get_hrn() type=xrn.get_type() + request=dbsession.query(RegRecord).filter_by(hrn=hrn) if type and type not in ['all', '*']: - filter['type'] = type + request=request.filter_by(type=type) - records = table.find(filter) - if not records: raise RecordNotFound(hrn) - record = records[0] - type = record['type'] - + record = request.first() + if not record: + msg="Could not find hrn %s"%hrn + if type: msg += " type=%s"%type + raise RecordNotFound(msg) + + type = record.type if type not in ['slice', 'user', 'node', 'authority'] : raise UnknownSfaType(type) @@ -382,15 +388,16 @@ class RegistryManager: # call testbed callback first # IIUC this is done on the local testbed TOO because of the refreshpeer link - if not self.driver.remove(record): + if not self.driver.remove(record.__dict__): logger.warning("driver.remove failed") # delete from sfa db - table.remove(record) + dbsession.delete(record) + dbsession.commit() return 1 - # This is a PLC-specific thing... + # This is a PLC-specific thing, won't work with other platforms def get_key_from_incoming_ip (self, api): # verify that the callers's ip address exist in the db and is an interface # for a node in the db @@ -404,23 +411,20 @@ class RegistryManager: node = nodes[0] # look up the sfa record - table = SfaTable() - records = table.findObjects({'type': 'node', 'pointer': node['node_id']}) - if not records: - raise RecordNotFound("pointer:" + str(node['node_id'])) - record = records[0] + record=dbsession.query(RegRecord).filter_by(type='node',pointer=node['node_id']).first() + if not record: + raise RecordNotFound("node with pointer %s"%node['node_id']) # generate a new keypair and gid uuid = create_uuid() pkey = Keypair(create=True) - urn = hrn_to_urn(record['hrn'], record['type']) + urn = hrn_to_urn(record.hrn, record.type) gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey) gid = gid_object.save_to_string(save_parents=True) - record['gid'] = gid - record.set_gid(gid) + record.gid = gid # update the record - table.update(record) + dbsession.commit() # attempt the scp the key # and gid onto the node diff --git a/sfa/managers/registry_manager_openstack.py b/sfa/managers/registry_manager_openstack.py index fc01fd16..6e210faf 100644 --- a/sfa/managers/registry_manager_openstack.py +++ b/sfa/managers/registry_manager_openstack.py @@ -1,5 +1,4 @@ import types -import time # for get_key_from_incoming_ip import tempfile import os @@ -13,15 +12,18 @@ from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn from sfa.util.plxrn import hrn_to_pl_login_base from sfa.util.version import version_core from sfa.util.sfalogging import logger + from sfa.trust.gid import GID from sfa.trust.credential import Credential from sfa.trust.certificate import Certificate, Keypair, convert_public_key from sfa.trust.gid import create_uuid -from sfa.storage.record import SfaRecord -from sfa.storage.table import SfaTable -from sfa.managers import registry_manager -class RegistryManager(registry_manager.RegistryManager): +from sfa.storage.model import make_record,RegRecord +from sfa.storage.alchemy import dbsession + +from sfa.managers.registry_manager import RegistryManager + +class RegistryManager(RegistryManager): def GetCredential(self, api, xrn, type, is_self=False): # convert xrn to hrn @@ -34,19 +36,19 @@ class RegistryManager(registry_manager.RegistryManager): auth_hrn = api.auth.get_authority(hrn) if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN: auth_hrn = hrn - # get record info auth_info = api.auth.get_auth_info(auth_hrn) - table = SfaTable() - records = table.findObjects({'type': type, 'hrn': hrn}) - if not records: - raise RecordNotFound(hrn) - record = records[0] + # get record info + record=dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first() + if not record: + raise RecordNotFound("hrn=%s, type=%s"%(hrn,type)) # verify_cancreate_credential requires that the member lists # (researchers, pis, etc) be filled in - self.driver.augment_records_with_testbed_info (record) - if not self.driver.is_enabled (record): - raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email'])) + logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys()) + self.driver.augment_records_with_testbed_info (record.__dict__) + logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys()) + if not self.driver.is_enabled (record.__dict__): + raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email)) # get the callers gid # if this is a self cred the record's gid is the caller's gid @@ -58,12 +60,12 @@ class RegistryManager(registry_manager.RegistryManager): caller_hrn = caller_gid.get_hrn() object_hrn = record.get_gid_object().get_hrn() - rights = api.auth.determine_user_rights(caller_hrn, record) + rights = api.auth.determine_user_rights(caller_hrn, record.__dict__) # make sure caller has rights to this object if rights.is_empty(): - raise PermissionError(caller_hrn + " has no rights to " + record['name']) + raise PermissionError(caller_hrn + " has no rights to " + record.hrn) - object_gid = GID(string=record['gid']) + object_gid = GID(string=record.gid) new_cred = Credential(subject = object_gid.get_subject()) new_cred.set_gid_caller(caller_gid) new_cred.set_gid_object(object_gid) @@ -71,8 +73,8 @@ class RegistryManager(registry_manager.RegistryManager): #new_cred.set_pubkey(object_gid.get_pubkey()) new_cred.set_privileges(rights) new_cred.get_privileges().delegate_all_privileges(True) - if 'expires' in record: - date = utcparse(record['expires']) + if hasattr(record,'expires'): + date = utcparse(record.expires) expires = datetime_to_epoch(date) new_cred.set_expiration(int(expires)) auth_kind = "authority,ma,sa" @@ -87,21 +89,21 @@ class RegistryManager(registry_manager.RegistryManager): # subject_record describes the subject of the relationships # ref_record contains the target values for the various relationships we need to manage # (to begin with, this is just the slice x person relationship) - def update_relations (self, subject_record, ref_record): - type=subject_record['type'] + def update_relations (self, subject_obj, ref_obj): + type=subject_obj.type if type=='slice': - self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user') + self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user') # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record # hrns is the list of hrns that should be linked to the subject from now on # target_type would be e.g. 'user' in the 'slice' x 'researcher' example - def update_relation (self, sfa_record, field_key, hrns, target_type): + def update_relation (self, record_obj, field_key, hrns, target_type): # locate the linked objects in our db - subject_type=sfa_record['type'] - subject_id=sfa_record['pointer'] - table = SfaTable() - link_sfa_records = table.find ({'type':target_type, 'hrn': hrns}) - link_ids = [ rec.get('pointer') for rec in link_sfa_records ] + subject_type=record_obj.type + subject_id=record_obj.pointer + # get the 'pointer' field of all matching records + link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all() + # sqlalchemy returns named tuples for columns + link_ids = [ tuple.pointer for tuple in link_id_tuples ] self.driver.update_relation (subject_type, target_type, subject_id, link_ids) - diff --git a/sfa/methods/CreateSliver.py b/sfa/methods/CreateSliver.py index 0e944ac7..27974891 100644 --- a/sfa/methods/CreateSliver.py +++ b/sfa/methods/CreateSliver.py @@ -40,7 +40,7 @@ class CreateSliver(Method): # make sure users info is specified if not users: - msg = "'users' musst be specified and cannot be null. You may need to update your client." + msg = "'users' must be specified and cannot be null. You may need to update your client." raise SfaInvalidArgument(name='users', extra=msg) # flter rspec through sfatables diff --git a/sfa/methods/GetSelfCredential.py b/sfa/methods/GetSelfCredential.py index fd87ca03..073ae94e 100644 --- a/sfa/methods/GetSelfCredential.py +++ b/sfa/methods/GetSelfCredential.py @@ -6,7 +6,6 @@ from sfa.util.method import Method from sfa.trust.certificate import Certificate from sfa.storage.parameter import Parameter, Mixed -from sfa.storage.record import SfaRecord class GetSelfCredential(Method): """ @@ -55,12 +54,24 @@ class GetSelfCredential(Method): self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name)) - # authenticate the gid + ### authenticate the gid + # import here so we can load this module at build-time for sfa2wsdl + #from sfa.storage.alchemy import dbsession + from sfa.storage.model import RegRecord + + # xxx-local - the current code runs Resolve, which would forward to + # another registry if needed + # I wonder if this is truly the intention, or shouldn't we instead + # only look in the local db ? records = self.api.manager.Resolve(self.api, xrn, type) if not records: raise RecordNotFound(hrn) - record = SfaRecord(dict=records[0]) - gid = record.get_gid_object() + + record_obj = RegRecord (dict=records[0]) + # xxx-local the local-only version would read + #record_obj = dbsession.query(RegRecord).filter_by(hrn=hrn).first() + #if not record_obj: raise RecordNotFound(hrn) + gid = record_obj.get_gid_object() gid_str = gid.save_to_string(save_parents=True) self.api.auth.authenticateGid(gid_str, [cert, type, hrn]) # authenticate the certificate against the gid in the db diff --git a/sfa/methods/List.py b/sfa/methods/List.py index bbd3e478..c023fa01 100644 --- a/sfa/methods/List.py +++ b/sfa/methods/List.py @@ -5,7 +5,6 @@ from sfa.util.method import Method from sfa.trust.credential import Credential from sfa.storage.parameter import Parameter, Mixed -from sfa.storage.record import SfaRecord class List(Method): """ @@ -23,7 +22,8 @@ class List(Method): Parameter(type([str]), "List of credentials")), ] - returns = [SfaRecord] + # xxx used to be [SfaRecord] + returns = [Parameter(dict, "registry record")] def call(self, xrn, creds): hrn, type = urn_to_hrn(xrn) diff --git a/sfa/methods/Resolve.py b/sfa/methods/Resolve.py index 9a6dd47c..7abc6cda 100644 --- a/sfa/methods/Resolve.py +++ b/sfa/methods/Resolve.py @@ -6,7 +6,6 @@ from sfa.util.method import Method from sfa.trust.credential import Credential from sfa.storage.parameter import Parameter, Mixed -from sfa.storage.record import SfaRecord class Resolve(Method): """ @@ -26,7 +25,8 @@ class Resolve(Method): Parameter(list, "List of credentials)")) ] - returns = [SfaRecord] + # xxx used to be [SfaRecord] + returns = [Parameter(dict, "registry record")] def call(self, xrns, creds): type = None diff --git a/sfa/openstack/openstack_driver.py b/sfa/openstack/openstack_driver.py index 67c5fede..3209f2b3 100644 --- a/sfa/openstack/openstack_driver.py +++ b/sfa/openstack/openstack_driver.py @@ -3,19 +3,24 @@ import datetime # from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \ RecordNotFound, SfaNotImplemented, SliverDoesNotExist + from sfa.util.sfalogging import logger from sfa.util.defaultdict import defaultdict from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id from sfa.util.cache import Cache + # one would think the driver should not need to mess with the SFA db, but.. -from sfa.storage.table import SfaTable + # used to be used in get_ticket #from sfa.trust.sfaticket import SfaTicket + from sfa.rspecs.version_manager import VersionManager from sfa.rspecs.rspec import RSpec + # the driver interface, mostly provides default behaviours from sfa.managers.driver import Driver + from sfa.openstack.openstack_shell import OpenstackShell import sfa.plc.peers as peers from sfa.plc.plaggregate import PlAggregate diff --git a/sfa/plc/pldriver.py b/sfa/plc/pldriver.py index 3f01e7f3..2b86f6cd 100644 --- a/sfa/plc/pldriver.py +++ b/sfa/plc/pldriver.py @@ -11,7 +11,8 @@ from sfa.util.xrn import hrn_to_urn, get_leaf, urn_to_sliver_id from sfa.util.cache import Cache # one would think the driver should not need to mess with the SFA db, but.. -from sfa.storage.table import SfaTable +from sfa.storage.alchemy import dbsession +from sfa.storage.model import RegRecord # used to be used in get_ticket #from sfa.trust.sfaticket import SfaTicket @@ -206,7 +207,7 @@ class PlDriver (Driver): ## - # Convert SFA fields to PLC fields for use when registering up updating + # Convert SFA fields to PLC fields for use when registering or updating # registry record in the PLC database # @@ -446,16 +447,15 @@ class PlDriver (Driver): # we'll replace pl ids (person_ids) with hrns from the sfa records # we obtain - # get the sfa records - table = SfaTable() + # get the registry records person_list, persons = [], {} - person_list = table.find({'type': 'user', 'pointer': person_ids}) + person_list = dbsession.query (RegRecord).filter(RegRecord.pointer.in_(person_ids)) # create a hrns keyed on the sfa record's pointer. # Its possible for multiple records to have the same pointer so # the dict's value will be a list of hrns. persons = defaultdict(list) for person in person_list: - persons[person['pointer']].append(person) + persons[person.pointer].append(person) # get the pl records pl_person_list, pl_persons = [], {} @@ -469,13 +469,14 @@ class PlDriver (Driver): # continue sfa_info = {} type = record['type'] + logger.info("fill_record_sfa_info - incoming record typed %s"%type) if (type == "slice"): # all slice users are researchers record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice') record['PI'] = [] record['researcher'] = [] for person_id in record.get('person_ids', []): - hrns = [person['hrn'] for person in persons[person_id]] + hrns = [person.hrn for person in persons[person_id]] record['researcher'].extend(hrns) # pis at the slice's site @@ -483,12 +484,13 @@ class PlDriver (Driver): pl_pis = site_pis[record['site_id']] pi_ids = [pi['person_id'] for pi in pl_pis] for person_id in pi_ids: - hrns = [person['hrn'] for person in persons[person_id]] + hrns = [person.hrn for person in persons[person_id]] record['PI'].extend(hrns) record['geni_creator'] = record['PI'] elif (type.startswith("authority")): record['url'] = None + logger.info("fill_record_sfa_info - authority xherex") if record['pointer'] != -1: record['PI'] = [] record['operator'] = [] @@ -497,7 +499,7 @@ class PlDriver (Driver): if pointer not in persons or pointer not in pl_persons: # this means there is not sfa or pl record for this user continue - hrns = [person['hrn'] for person in persons[pointer]] + hrns = [person.hrn for person in persons[pointer]] roles = pl_persons[pointer]['roles'] if 'pi' in roles: record['PI'].extend(hrns) @@ -511,6 +513,7 @@ class PlDriver (Driver): # xxx TODO: URI, LatLong, IP, DNS elif (type == "user"): + logger.info('setting user.email') sfa_info['email'] = record.get("email", "") sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user') sfa_info['geni_certificate'] = record['gid'] diff --git a/sfa/server/registry.py b/sfa/server/registry.py index bdad7df2..13a75fc7 100644 --- a/sfa/server/registry.py +++ b/sfa/server/registry.py @@ -20,6 +20,11 @@ class Registry(SfaServer): def __init__(self, ip, port, key_file, cert_file): SfaServer.__init__(self, ip, port, key_file, cert_file,'registry') + sfa_config=Config() + if Config().SFA_REGISTRY_ENABLED: + from sfa.storage.alchemy import engine + from sfa.storage.dbschema import DBSchema + DBSchema().init_or_upgrade() # # Registries is a dictionary of registry connections keyed on the registry hrn diff --git a/sfa/server/sfa-ca.py b/sfa/server/sfa-ca.py index b87e119c..9ab75e7e 100755 --- a/sfa/server/sfa-ca.py +++ b/sfa/server/sfa-ca.py @@ -27,7 +27,8 @@ from sfa.util.config import Config from sfa.trust.gid import GID, create_uuid from sfa.trust.hierarchy import Hierarchy -from sfa.storage.table import SfaTable +from sfa.storage.alchemy import dbsession +from sfa.storage.model import RegRecord def main(): args = sys.argv @@ -116,12 +117,10 @@ def export_gid(options): hrn = options.export type = options.type # check sfa table first - filter = {'hrn': hrn} - if type: - filter['type'] = type - table = SfaTable() - records = table.find(filter) - if not records: + request=dbsession.query(RegRecord).filter_by(hrn=hrn) + if type: request = request.filter_by(type=type) + record=request.first() + if not record: # check the authorities hierarchy hierarchy = Hierarchy() try: @@ -131,8 +130,7 @@ def export_gid(options): print "Record: %s not found" % hrn sys.exit(1) else: - record = records[0] - gid = GID(string=record['gid']) + gid = GID(string=record.gid) # get the outfile outfile = options.outfile @@ -163,16 +161,14 @@ def import_gid(options): sys.exit(1) # check if record exists in db - table = SfaTable() - records = table.find({'hrn': gid.get_hrn(), 'type': 'authority'}) - if not records: + record = dbsession.query(RegRecord).filter_by(type='authority',hrn=gid.get_hrn()).first() + if not record: print "%s not found in record database" % gid.get_hrn() sys.exit(1) # update the database record - record = records[0] - record['gid'] = gid.save_to_string(save_parents=True) - table.update(record) + record.gid = gid.save_to_string(save_parents=True) + dbsession.commit() if options.verbose: print "Imported %s gid into db" % record['hrn'] diff --git a/sfa/server/sfa-clean-peer-records.py b/sfa/server/sfa-clean-peer-records.py index a618e506..795c747f 100644 --- a/sfa/server/sfa-clean-peer-records.py +++ b/sfa/server/sfa-clean-peer-records.py @@ -12,7 +12,8 @@ from sfa.trust.certificate import Keypair from sfa.trust.hierarchy import Hierarchy from sfa.server.registry import Registries -from sfa.storage.table import SfaTable +from sfa.storage.alchemy import dbsession +from sfa.storage.model import RegRecord from sfa.client.sfaserverproxy import SfaServerProxy @@ -45,15 +46,14 @@ def main(): tree.load(registries.keys()) # get local peer records - table = SfaTable() - peer_records = table.find({'~peer_authority': None}) + peer_records=dbsession.query(RegRecord).filter (RegRecord.peer_authority != None).all() found_records = [] hrn_dict = {} for record in peer_records: - registry_hrn = tree.best_match(record['hrn']) + registry_hrn = tree.best_match(record.hrn) if registry_hrn not in hrn_dict: hrn_dict[registry_hrn] = [] - hrn_dict[registry_hrn].append(record['hrn']) + hrn_dict[registry_hrn].append(record.hrn) # attempt to resolve the record at the authoritative interface for registry_hrn in hrn_dict: @@ -75,8 +75,8 @@ def main(): # remove what wasnt found for peer_record in peer_records: - if peer_record['hrn'] not in found_records: - registries[sfa_api.hrn].Remove(peer_record['hrn'], credential, peer_record['type']) + if peer_record.hrn not in found_records: + registries[sfa_api.hrn].Remove(peer_record.hrn, credential, peer_record.type) if __name__ == '__main__': main() diff --git a/sfa/server/sfa-start.py b/sfa/server/sfa-start.py index a08191bf..e996f3b8 100755 --- a/sfa/server/sfa-start.py +++ b/sfa/server/sfa-start.py @@ -129,35 +129,35 @@ def update_cert_records(gids): Make sure there is a record in the registry for the specified gids. Removes old records from the db. """ - # import SfaTable here so this module can be loaded by PlcComponentApi - from sfa.storage.table import SfaTable - from sfa.storage.record import SfaRecord + # import db stuff here here so this module can be loaded by PlcComponentApi + from sfa.storage.alchemy import dbsession + from sfa.storage.model import RegRecord if not gids: return - table = SfaTable() # get records that actually exist in the db gid_urns = [gid.get_urn() for gid in gids] hrns_expected = [gid.get_hrn() for gid in gids] - records_found = table.find({'hrn': hrns_expected, 'pointer': -1}) + records_found = dbsession.query(RegRecord).\ + filter_by(pointer=-1).filter(RegRecord.hrn.in_(hrns_expected)).all() # remove old records for record in records_found: - if record['hrn'] not in hrns_expected and \ - record['hrn'] != self.api.config.SFA_INTERFACE_HRN: - table.remove(record) + if record.hrn not in hrns_expected and \ + record.hrn != self.api.config.SFA_INTERFACE_HRN: + dbsession.delete(record) # TODO: store urn in the db so we do this in 1 query for gid in gids: hrn, type = gid.get_hrn(), gid.get_type() - record = table.find({'hrn': hrn, 'type': type, 'pointer': -1}) + record = dbsession.query(RegRecord).filter_by(hrn=hrn, type=type,pointer=-1).first() if not record: - record = { - 'hrn': hrn, 'type': type, 'pointer': -1, - 'authority': get_authority(hrn), - 'gid': gid.save_to_string(save_parents=True), - } - record = SfaRecord(dict=record) - table.insert(record) + record = RegRecord (dict= {'type':type, + 'hrn': hrn, + 'authority': get_authority(hrn), + 'gid': gid.save_to_string(save_parents=True), + }) + dbsession.add(record) + dbsession.commit() def main(): # Generate command line parser diff --git a/sfa/server/sfaapi.py b/sfa/server/sfaapi.py index ad5cbe1a..2510ec4e 100644 --- a/sfa/server/sfaapi.py +++ b/sfa/server/sfaapi.py @@ -1,12 +1,12 @@ import os, os.path import datetime -from sfa.util.faults import SfaFault, SfaAPIError +from sfa.util.faults import SfaFault, SfaAPIError, RecordNotFound from sfa.util.genicode import GENICODE from sfa.util.config import Config from sfa.util.cache import Cache -from sfa.trust.auth import Auth +from sfa.trust.auth import Auth from sfa.trust.certificate import Keypair, Certificate from sfa.trust.credential import Credential from sfa.trust.rights import determine_rights @@ -109,9 +109,9 @@ class SfaApi (XmlrpcApi): # get a new credential if self.interface in ['registry']: - cred = self.__getCredentialRaw() + cred = self._getCredentialRaw() else: - cred = self.__getCredential() + cred = self._getCredential() cred.save_to_file(cred_filename, save_parents=True) return cred.save_to_string(save_parents=True) @@ -134,7 +134,7 @@ class SfaApi (XmlrpcApi): break return delegated_cred - def __getCredential(self): + def _getCredential(self): """ Get our credential from a remote registry """ @@ -148,7 +148,7 @@ class SfaApi (XmlrpcApi): cred = registry.GetCredential(self_cred, self.hrn, 'authority') return Credential(string=cred) - def __getCredentialRaw(self): + def _getCredentialRaw(self): """ Get our current credential directly from the local registry. """ @@ -160,15 +160,12 @@ class SfaApi (XmlrpcApi): if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN: auth_hrn = hrn auth_info = self.auth.get_auth_info(auth_hrn) - # xxx thgen fixme - use SfaTable hardwired for now - # thgen xxx fixme this is wrong all right, but temporary, will use generic - from sfa.storage.table import SfaTable - table = SfaTable() - records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'}) - if not records: - raise RecordNotFound - record = records[0] - type = record['type'] + from sfa.storage.alchemy import dbsession + from sfa.storage.model import RegRecord + record = dbsession.query(RegRecord).filter_by(type='authority+sa', hrn=hrn).first() + if not record: + raise RecordNotFound(hrn) + type = record.type object_gid = record.get_gid_object() new_cred = Credential(subject = object_gid.get_subject()) new_cred.set_gid_caller(object_gid) diff --git a/sfa/storage/PostgreSQL.py b/sfa/storage/PostgreSQL.py deleted file mode 100644 index de6f9fab..00000000 --- a/sfa/storage/PostgreSQL.py +++ /dev/null @@ -1,272 +0,0 @@ -# -# PostgreSQL database interface. Sort of like DBI(3) (Database -# independent interface for Perl). -# -# - -import re -import traceback -import commands -from pprint import pformat -from types import StringTypes, NoneType - -import psycopg2 -import psycopg2.extensions -psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) -# UNICODEARRAY not exported yet -psycopg2.extensions.register_type(psycopg2._psycopg.UNICODEARRAY) - -# allow to run sfa2wsdl if this is missing (for mac) -import sys -try: import pgdb -except: print >> sys.stderr, "WARNING, could not import pgdb" - -from sfa.util.faults import SfaDBError -from sfa.util.sfalogging import logger - -if not psycopg2: - is8bit = re.compile("[\x80-\xff]").search - - def unicast(typecast): - """ - pgdb returns raw UTF-8 strings. This function casts strings that - apppear to contain non-ASCII characters to unicode objects. - """ - - def wrapper(*args, **kwds): - value = typecast(*args, **kwds) - - # pgdb always encodes unicode objects as UTF-8 regardless of - # the DB encoding (and gives you no option for overriding - # the encoding), so always decode 8-bit objects as UTF-8. - if isinstance(value, str) and is8bit(value): - value = unicode(value, "utf-8") - - return value - - return wrapper - - pgdb.pgdbTypeCache.typecast = unicast(pgdb.pgdbTypeCache.typecast) - -def handle_exception(f): - def wrapper(*args, **kwds): - try: return f(*args, **kwds) - except Exception, fault: - raise SfaDBError(str(fault)) - return wrapper - -class PostgreSQL: - def __init__(self, config): - self.config = config - self.debug = False -# self.debug = True - self.connection = None - - @handle_exception - def cursor(self): - if self.connection is None: - # (Re)initialize database connection - if psycopg2: - try: - # Try UNIX socket first - self.connection = psycopg2.connect(user = self.config.SFA_DB_USER, - password = self.config.SFA_DB_PASSWORD, - database = self.config.SFA_DB_NAME) - except psycopg2.OperationalError: - # Fall back on TCP - self.connection = psycopg2.connect(user = self.config.SFA_DB_USER, - password = self.config.SFA_DB_PASSWORD, - database = self.config.SFA_DB_NAME, - host = self.config.SFA_DB_HOST, - port = self.config.SFA_DB_PORT) - self.connection.set_client_encoding("UNICODE") - else: - self.connection = pgdb.connect(user = self.config.SFA_DB_USER, - password = self.config.SFA_DB_PASSWORD, - host = "%s:%d" % (self.config.SFA_DB_HOST, self.config.SFA_DB_PORT), - database = self.config.SFA_DB_NAME) - - (self.rowcount, self.description, self.lastrowid) = \ - (None, None, None) - - return self.connection.cursor() - - def close(self): - if self.connection is not None: - self.connection.close() - self.connection = None - - def quote(self, value): - """ - Returns quoted version of the specified value. - """ - - # The pgdb._quote function is good enough for general SQL - # quoting, except for array types. - if isinstance(value, (list, tuple, set)): - return "ARRAY[%s]" % ", ".join(map, self.quote, value) - else: - return pgdb._quote(value) - - quote = classmethod(quote) - - def param(self, name, value): - # None is converted to the unquoted string NULL - if isinstance(value, NoneType): - conversion = "s" - # True and False are also converted to unquoted strings - elif isinstance(value, bool): - conversion = "s" - elif isinstance(value, float): - conversion = "f" - elif not isinstance(value, StringTypes): - conversion = "d" - else: - conversion = "s" - - return '%(' + name + ')' + conversion - - param = classmethod(param) - - def begin_work(self): - # Implicit in pgdb.connect() - pass - - def commit(self): - self.connection.commit() - - def rollback(self): - self.connection.rollback() - - def do(self, query, params = None): - cursor = self.execute(query, params) - cursor.close() - return self.rowcount - - def next_id(self, table_name, primary_key): - sequence = "%(table_name)s_%(primary_key)s_seq" % locals() - sql = "SELECT nextval('%(sequence)s')" % locals() - rows = self.selectall(sql, hashref = False) - if rows: - return rows[0][0] - return None - - def last_insert_id(self, table_name, primary_key): - if isinstance(self.lastrowid, int): - sql = "SELECT %s FROM %s WHERE oid = %d" % \ - (primary_key, table_name, self.lastrowid) - rows = self.selectall(sql, hashref = False) - if rows: - return rows[0][0] - - return None - - # modified for psycopg2-2.0.7 - # executemany is undefined for SELECT's - # see http://www.python.org/dev/peps/pep-0249/ - # accepts either None, a single dict, a tuple of single dict - in which case it execute's - # or a tuple of several dicts, in which case it executemany's - def execute(self, query, params = None): - - cursor = self.cursor() - try: - - # psycopg2 requires %()s format for all parameters, - # regardless of type. - # this needs to be done carefully though as with pattern-based filters - # we might have percents embedded in the query - # so e.g. GetPersons({'email':'*fake*'}) was resulting in .. LIKE '%sake%' - if psycopg2: - query = re.sub(r'(%\([^)]*\)|%)[df]', r'\1s', query) - # rewrite wildcards set by Filter.py as '***' into '%' - query = query.replace ('***','%') - - if not params: - if self.debug: - logger.debug('execute0 %r'%query) - cursor.execute(query) - elif isinstance(params,dict): - if self.debug: - logger.debug('execute-dict: params=[%r] query=[%r]'%(params,query%params)) - cursor.execute(query,params) - elif isinstance(params,tuple) and len(params)==1: - if self.debug: - logger.debug('execute-tuple %r'%(query%params[0])) - cursor.execute(query,params[0]) - else: - param_seq=(params,) - if self.debug: - for params in param_seq: - logger.debug('executemany %r'%(query%params)) - cursor.executemany(query, param_seq) - (self.rowcount, self.description, self.lastrowid) = \ - (cursor.rowcount, cursor.description, cursor.lastrowid) - except Exception, e: - try: - self.rollback() - except: - pass - uuid = commands.getoutput("uuidgen") - logger.error("Database error %s:" % uuid) - logger.error("Exception=%r"%e) - logger.error("Query=%r"%query) - logger.error("Params=%r"%pformat(params)) - logger.log_exc("PostgreSQL.execute caught exception") - raise SfaDBError("Please contact support: %s" % str(e)) - - return cursor - - def selectall(self, query, params = None, hashref = True, key_field = None): - """ - Return each row as a dictionary keyed on field name (like DBI - selectrow_hashref()). If key_field is specified, return rows - as a dictionary keyed on the specified field (like DBI - selectall_hashref()). - - If params is specified, the specified parameters will be bound - to the query. - """ - - cursor = self.execute(query, params) - rows = cursor.fetchall() - cursor.close() - self.commit() - if hashref or key_field is not None: - # Return each row as a dictionary keyed on field name - # (like DBI selectrow_hashref()). - labels = [column[0] for column in self.description] - rows = [dict(zip(labels, row)) for row in rows] - - if key_field is not None and key_field in labels: - # Return rows as a dictionary keyed on the specified field - # (like DBI selectall_hashref()). - return dict([(row[key_field], row) for row in rows]) - else: - return rows - - def fields(self, table, notnull = None, hasdef = None): - """ - Return the names of the fields of the specified table. - """ - - if hasattr(self, 'fields_cache'): - if self.fields_cache.has_key((table, notnull, hasdef)): - return self.fields_cache[(table, notnull, hasdef)] - else: - self.fields_cache = {} - - sql = "SELECT attname FROM pg_attribute, pg_class" \ - " WHERE pg_class.oid = attrelid" \ - " AND attnum > 0 AND relname = %(table)s" - - if notnull is not None: - sql += " AND attnotnull is %(notnull)s" - - if hasdef is not None: - sql += " AND atthasdef is %(hasdef)s" - - rows = self.selectall(sql, locals(), hashref = False) - - self.fields_cache[(table, notnull, hasdef)] = [row[0] for row in rows] - - return self.fields_cache[(table, notnull, hasdef)] diff --git a/sfa/storage/alchemy.py b/sfa/storage/alchemy.py new file mode 100644 index 00000000..7e001166 --- /dev/null +++ b/sfa/storage/alchemy.py @@ -0,0 +1,69 @@ +from types import StringTypes + +from sqlalchemy import create_engine +from sqlalchemy.orm import sessionmaker + +from sqlalchemy import Column, Integer, String +from sqlalchemy.orm import relationship, backref +from sqlalchemy import ForeignKey + +from sfa.util.sfalogging import logger + +# this module is designed to be loaded when the configured db server is reachable +# OTOH model can be loaded from anywhere including the client-side + +class Alchemy: + + def __init__ (self, config): + dbname="sfa" + # will be created lazily on-demand + self._session = None + # the former PostgreSQL.py used the psycopg2 directly and was doing + #self.connection.set_client_encoding("UNICODE") + # it's unclear how to achieve this in sqlalchemy, nor if it's needed at all + # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode + # we indeed have /var/lib/pgsql/data/postgresql.conf where + # this setting is unset, it might be an angle to tweak that if need be + # try a unix socket first - omitting the hostname does the trick + unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\ + (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname) + # the TCP fallback method + tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\ + (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname) + for url in [ unix_url, tcp_url ] : + try: + self.engine = create_engine (url) + self.check() + self.url=url + return + except: + pass + self.engine=None + raise Exception,"Could not connect to database" + + + # expects boolean True: debug is ON or False: debug is OFF + def debug (self, echo): + self.engine.echo=echo + + def check (self): + self.engine.execute ("select 1").scalar() + + def session (self): + if self._session is None: + Session=sessionmaker () + self._session=Session(bind=self.engine) + return self._session + + def close_session (self): + if self._session is None: return + self._session.close() + self._session=None + +#################### +from sfa.util.config import Config + +alchemy=Alchemy (Config()) +engine=alchemy.engine +dbsession=alchemy.session() + diff --git a/sfa/storage/dbschema.py b/sfa/storage/dbschema.py new file mode 100644 index 00000000..79d3c677 --- /dev/null +++ b/sfa/storage/dbschema.py @@ -0,0 +1,124 @@ +import sys +import traceback + +from sqlalchemy import MetaData, Table +from sqlalchemy.exc import NoSuchTableError + +import migrate.versioning.api as migrate + +from sfa.util.sfalogging import logger +import sfa.storage.model as model + +########## this class takes care of database upgrades +### upgrade from a pre-2.1 db +# * 1.0 and up to 1.1-4: ('very old') +# was piggybacking the planetlab5 database +# this is kind of out of our scope here, we don't have the credentials +# to connect to planetlab5, but this is documented in +# https://svn.planet-lab.org/wiki/SFATutorialConfigureSFA#Upgradingnotes +# and essentially this is seamless to users +# * from 1.1-5 up to 2.0-x: ('old') +# uses the 'sfa' db and essentially the 'records' table, +# as well as record_types +# together with an 'sfa_db_version' table (version, subversion) +# * from 2.1: +# we have an 'records' table, plus 'users' and the like +# and once migrate has kicked in there is a table named (see migrate.cfg) +# migrate_db_version (repository_id, repository_path, version) +### after 2.1 +# Starting with 2.1, we use sqlalchemy-migrate scripts in a standard way +# Note that the model defined in sfa.storage.model needs to be maintained +# as the 'current/latest' version, and newly installed deployments will +# then 'jump' to the latest version number without going through the migrations +### +# An initial attempt to run this as a 001_*.py migrate script +# did not quite work out (essentially we need to set the current version +# number out of the migrations logic) +# also this approach has less stuff in the initscript, which seems just right + +class DBSchema: + + header="Upgrading to 2.1 or higher" + + def __init__ (self): + from sfa.storage.alchemy import alchemy + self.url=alchemy.url + self.engine=alchemy.engine + self.repository="/usr/share/sfa/migrations" + self.meta=MetaData (bind=self.engine) + + def current_version (self): + try: + return migrate.db_version (self.url, self.repository) + except: + return None + + def table_exists (self, tablename): + try: + table=Table (tablename, self.meta, autoload=True) + return True + except NoSuchTableError: + return False + + def drop_table (self, tablename): + if self.table_exists (tablename): + print >>sys.stderr, "%s: Dropping table %s"%(DBSchema.header,tablename) + self.engine.execute ("drop table %s cascade"%tablename) + else: + print >>sys.stderr, "%s: no need to drop table %s"%(DBSchema.header,tablename) + + def handle_old_releases (self): + try: + # try to find out which old version this can be + if not self.table_exists ('records'): + # this likely means we've just created the db, so it's either a fresh install + # or we come from a 'very old' depl. + # in either case, an import is required but there's nothing to clean up + print >> sys.stderr,"%s: make sure to run import"%(DBSchema.header,) + elif self.table_exists ('sfa_db_version'): + # we come from an 'old' version + self.drop_table ('records') + self.drop_table ('record_types') + self.drop_table ('sfa_db_version') + else: + # we should be good here + pass + except: + print >> sys.stderr, "%s: unknown exception"%(DBSchema.header,) + traceback.print_exc () + + # after this call the db schema and the version as known by migrate should + # reflect the current data model and the latest known version + def init_or_upgrade (self): + # check if under version control, and initialize it otherwise + if self.current_version() is None: + before="Unknown" + # can be either a very old version, or a fresh install + # for very old versions: + self.handle_old_releases() + # in any case, initialize db from current code and reflect in migrate + model.init_tables(self.engine) + code_version = migrate.version (self.repository) + migrate.version_control (self.url, self.repository, code_version) + after="%s"%self.current_version() + logger.info("DBSchema : jumped to version %s"%(after)) + else: + # use migrate in the usual way + before="%s"%self.current_version() + migrate.upgrade (self.url, self.repository) + after="%s"%self.current_version() + if before != after: + logger.info("DBSchema : upgraded version from %s to %s"%(before,after)) + + # this trashes the db altogether, from the current model in sfa.storage.model + # I hope this won't collide with ongoing migrations and all + # actually, now that sfa uses its own db, this is essentially equivalent to + # dropping the db entirely, modulo a 'service sfa start' + def nuke (self): + model.drop_tables(self.engine) + # so in this case it's like we haven't initialized the db at all + migrate.drop_version_control (self.url, self.repository) + + +if __name__ == '__main__': + DBSchema().init_or_upgrade() diff --git a/sfa/storage/filter.py b/sfa/storage/filter.py deleted file mode 100644 index afe425d8..00000000 --- a/sfa/storage/filter.py +++ /dev/null @@ -1,208 +0,0 @@ -from types import StringTypes -import pgdb - -from sfa.util.faults import SfaInvalidArgument - -from sfa.storage.parameter import Parameter, Mixed, python_type - -class Filter(Parameter, dict): - """ - A type of parameter that represents a filter on one or more - columns of a database table. - Special features provide support for negation, upper and lower bounds, - as well as sorting and clipping. - - - fields should be a dictionary of field names and types - Only filters on non-sequence type fields are supported. - example : fields = {'node_id': Parameter(int, "Node identifier"), - 'hostname': Parameter(int, "Fully qualified hostname", max = 255), - ...} - - - filter should be a dictionary of field names and values - representing the criteria for filtering. - example : filter = { 'hostname' : '*.edu' , site_id : [34,54] } - Whether the filter represents an intersection (AND) or a union (OR) - of these criteria is determined by the join_with argument - provided to the sql method below - - Special features: - - * a field starting with the ~ character means negation. - example : filter = { '~peer_id' : None } - - * a field starting with < [ ] or > means lower than or greater than - < > uses strict comparison - [ ] is for using <= or >= instead - example : filter = { ']event_id' : 2305 } - example : filter = { '>time' : 1178531418 } - in this example the integer value denotes a unix timestamp - - * if a value is a sequence type, then it should represent - a list of possible values for that field - example : filter = { 'node_id' : [12,34,56] } - - * a (string) value containing either a * or a % character is - treated as a (sql) pattern; * are replaced with % that is the - SQL wildcard character. - example : filter = { 'hostname' : '*.jp' } - - * fields starting with - are special and relate to row selection, i.e. sorting and clipping - * '-SORT' : a field name, or an ordered list of field names that are used for sorting - these fields may start with + (default) or - for denoting increasing or decreasing order - example : filter = { '-SORT' : [ '+node_id', '-hostname' ] } - * '-OFFSET' : the number of first rows to be ommitted - * '-LIMIT' : the amount of rows to be returned - example : filter = { '-OFFSET' : 100, '-LIMIT':25} - - A realistic example would read - GetNodes ( { 'node_type' : 'regular' , 'hostname' : '*.edu' , '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } ) - and that would return regular (usual) nodes matching '*.edu' in alphabetical order from 31th to 55th - """ - - def __init__(self, fields = {}, filter = {}, doc = "Attribute filter"): - # Store the filter in our dict instance - valid_fields = {} - for field in filter: - if field in fields: - valid_fields[field] = filter[field] - dict.__init__(self, valid_fields) - - # Declare ourselves as a type of parameter that can take - # either a value or a list of values for each of the specified - # fields. - self.fields = dict ( [ ( field, Mixed (expected, [expected])) - for (field,expected) in fields.iteritems() - if python_type(expected) not in (list, tuple, set) ] ) - - # Null filter means no filter - Parameter.__init__(self, self.fields, doc = doc, nullok = True) - - def quote(self, value): - """ - Returns quoted version of the specified value. - """ - - # The pgdb._quote function is good enough for general SQL - # quoting, except for array types. - if isinstance(value, (list, tuple, set)): - return "ARRAY[%s]" % ", ".join(map(self.quote, value)) - else: - return pgdb._quote(value) - - def sql(self, join_with = "AND"): - """ - Returns a SQL conditional that represents this filter. - """ - - # So that we always return something - if join_with == "AND": - conditionals = ["True"] - elif join_with == "OR": - conditionals = ["False"] - else: - assert join_with in ("AND", "OR") - - # init - sorts = [] - clips = [] - - for field, value in self.iteritems(): - # handle negation, numeric comparisons - # simple, 1-depth only mechanism - - modifiers={'~' : False, - '<' : False, '>' : False, - '[' : False, ']' : False, - '-' : False, - } - - for char in modifiers.keys(): - if field[0] == char: - modifiers[char]=True - field = field[1:] - break - - # filter on fields - if not modifiers['-']: - if field not in self.fields: - raise SfaInvalidArgument, "Invalid filter field '%s'" % field - - if isinstance(value, (list, tuple, set)): - # handling filters like '~slice_id':[] - # this should return true, as it's the opposite of 'slice_id':[] which is false - # prior to this fix, 'slice_id':[] would have returned ``slice_id IN (NULL) '' which is unknown - # so it worked by coincidence, but the negation '~slice_ids':[] would return false too - if not value: - field="" - operator="" - value = "FALSE" - else: - operator = "IN" - value = map(str, map(self.quote, value)) - value = "(%s)" % ", ".join(value) - else: - if value is None: - operator = "IS" - value = "NULL" - elif isinstance(value, StringTypes) and \ - (value.find("*") > -1 or value.find("%") > -1): - operator = "LIKE" - # insert *** in pattern instead of either * or % - # we dont use % as requests are likely to %-expansion later on - # actual replacement to % done in PostgreSQL.py - value = value.replace ('*','***') - value = value.replace ('%','***') - value = str(self.quote(value)) - else: - operator = "=" - if modifiers['<']: - operator='<' - if modifiers['>']: - operator='>' - if modifiers['[']: - operator='<=' - if modifiers[']']: - operator='>=' - else: - value = str(self.quote(value)) - - clause = "%s %s %s" % (field, operator, value) - - if modifiers['~']: - clause = " ( NOT %s ) " % (clause) - - conditionals.append(clause) - # sorting and clipping - else: - if field not in ('SORT','OFFSET','LIMIT'): - raise SfaInvalidArgument, "Invalid filter, unknown sort and clip field %r"%field - # sorting - if field == 'SORT': - if not isinstance(value,(list,tuple,set)): - value=[value] - for field in value: - order = 'ASC' - if field[0] == '+': - field = field[1:] - elif field[0] == '-': - field = field[1:] - order = 'DESC' - if field not in self.fields: - raise SfaInvalidArgument, "Invalid field %r in SORT filter"%field - sorts.append("%s %s"%(field,order)) - # clipping - elif field == 'OFFSET': - clips.append("OFFSET %d"%value) - # clipping continued - elif field == 'LIMIT' : - clips.append("LIMIT %d"%value) - - where_part = (" %s " % join_with).join(conditionals) - clip_part = "" - if sorts: - clip_part += " ORDER BY " + ",".join(sorts) - if clips: - clip_part += " " + " ".join(clips) - return (where_part,clip_part) diff --git a/sfa/storage/migrations/__init__.py b/sfa/storage/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sfa/storage/migrations/migrate.cfg b/sfa/storage/migrations/migrate.cfg new file mode 100644 index 00000000..c570dd90 --- /dev/null +++ b/sfa/storage/migrations/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=sqlalchemy-migrate repository for SFA-2.1 and on + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_db_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=['postgres'] diff --git a/sfa/storage/migrations/versions/__init__.py b/sfa/storage/migrations/versions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sfa/storage/model.py b/sfa/storage/model.py new file mode 100644 index 00000000..7bb12aa1 --- /dev/null +++ b/sfa/storage/model.py @@ -0,0 +1,258 @@ +from types import StringTypes +from datetime import datetime + +from sqlalchemy import Column, Integer, String, DateTime +from sqlalchemy import Table, Column, MetaData, join, ForeignKey +from sqlalchemy.orm import relationship, backref +from sqlalchemy.orm import column_property +from sqlalchemy.orm import object_mapper +from sqlalchemy.orm import validates +from sqlalchemy.ext.declarative import declarative_base + +from sfa.util.sfalogging import logger +from sfa.util.xml import XML + +from sfa.trust.gid import GID + +############################## +Base=declarative_base() + +#################### +# dicts vs objects +#################### +# historically the front end to the db dealt with dicts, so the code was only dealing with dicts +# sqlalchemy however offers an object interface, meaning that you write obj.id instead of obj['id'] +# which is admittedly much nicer +# however we still need to deal with dictionaries if only for the xmlrpc layer +# +# here are a few utilities for this +# +# (*) first off, when an old pieve of code needs to be used as-is, if only temporarily, the simplest trick +# is to use obj.__dict__ +# this behaves exactly like required, i.e. obj.__dict__['field']='new value' does change obj.field +# however this depends on sqlalchemy's implementation so it should be avoided +# +# (*) second, when an object needs to be exposed to the xmlrpc layer, we need to convert it into a dict +# remember though that writing the resulting dictionary won't change the object +# essentially obj.__dict__ would be fine too, except that we want to discard alchemy private keys starting with '_' +# 2 ways are provided for that: +# . dict(obj) +# . obj.todict() +# the former dict(obj) relies on __iter__() and next() below, and does not rely on the fields names +# although it seems to work fine, I've found cases where it issues a weird python error that I could not get right +# so the latter obj.todict() seems more reliable but more hacky as is relies on the form of fields, so this can probably be improved +# +# (*) finally for converting a dictionary into an sqlalchemy object, we provide +# obj.load_from_dict(dict) + +class AlchemyObj: + def __iter__(self): + self._i = iter(object_mapper(self).columns) + return self + def next(self): + n = self._i.next().name + return n, getattr(self, n) + def todict (self): + d=self.__dict__ + keys=[k for k in d.keys() if not k.startswith('_')] + return dict ( [ (k,d[k]) for k in keys ] ) + def load_from_dict (self, d): + for (k,v) in d.iteritems(): + # experimental + if isinstance(v, StringTypes) and v.lower() in ['true']: v=True + if isinstance(v, StringTypes) and v.lower() in ['false']: v=False + setattr(self,k,v) + + # in addition we provide convenience for converting to and from xml records + # for this purpose only, we need the subclasses to define 'fields' as either + # a list or a dictionary + def xml_fields (self): + fields=self.fields + if isinstance(fields,dict): fields=fields.keys() + return fields + + def save_as_xml (self): + # xxx not sure about the scope here + input_dict = dict( [ (key, getattr(self.key), ) for key in self.xml_fields() if getattr(self,key,None) ] ) + xml_record=XML("") + xml_record.parse_dict (input_dict) + return xml_record.toxml() + + def dump(self, dump_parents=False): + for key in self.fields: + if key == 'gid' and self.gid: + gid = GID(string=self.gid) + print " %s:" % key + gid.dump(8, dump_parents) + elif getattr(self,key,None): + print " %s: %s" % (key, getattr(self,key)) + +# # only intended for debugging +# def inspect (self, logger, message=""): +# logger.info("%s -- Inspecting AlchemyObj -- attrs"%message) +# for k in dir(self): +# if not k.startswith('_'): +# logger.info (" %s: %s"%(k,getattr(self,k))) +# logger.info("%s -- Inspecting AlchemyObj -- __dict__"%message) +# d=self.__dict__ +# for (k,v) in d.iteritems(): +# logger.info("[%s]=%s"%(k,v)) + + +############################## +# various kinds of records are implemented as an inheritance hierarchy +# RegRecord is the base class for all actual variants +# a first draft was using 'type' as the discriminator for the inheritance +# but we had to define another more internal column (classtype) so we +# accomodate variants in types like authority+am and the like + +class RegRecord (Base,AlchemyObj): + __tablename__ = 'records' + record_id = Column (Integer, primary_key=True) + # this is the discriminator that tells which class to use + classtype = Column (String) + type = Column (String) + hrn = Column (String) + gid = Column (String) + authority = Column (String) + peer_authority = Column (String) + pointer = Column (Integer, default=-1) + date_created = Column (DateTime) + last_updated = Column (DateTime) + # use the 'type' column to decide which subclass the object is of + __mapper_args__ = { 'polymorphic_on' : classtype } + + fields = [ 'type', 'hrn', 'gid', 'authority', 'peer_authority' ] + def __init__ (self, type=None, hrn=None, gid=None, authority=None, peer_authority=None, + pointer=None, dict=None): + if type: self.type=type + if hrn: self.hrn=hrn + if gid: + if isinstance(gid, StringTypes): self.gid=gid + else: self.gid=gid.save_to_string(save_parents=True) + if authority: self.authority=authority + if peer_authority: self.peer_authority=peer_authority + if pointer: self.pointer=pointer + if dict: self.load_from_dict (dict) + + def __repr__(self): + result="[Record id=%s, type=%s, hrn=%s, authority=%s, pointer=%s" % \ + (self.record_id, self.type, self.hrn, self.authority, self.pointer) + # skip the uniform '--- BEGIN CERTIFICATE --' stuff + if self.gid: result+=" gid=%s..."%self.gid[28:36] + else: result+=" nogid" + result += "]" + return result + + @validates ('gid') + def validate_gid (self, key, gid): + if gid is None: return + elif isinstance(gid, StringTypes): return gid + else: return gid.save_to_string(save_parents=True) + + # xxx - there might be smarter ways to handle get/set'ing gid using validation hooks + def get_gid_object (self): + if not self.gid: return None + else: return GID(string=self.gid) + + def just_created (self): + now=datetime.now() + self.date_created=now + self.last_updated=now + + def just_updated (self): + now=datetime.now() + self.last_updated=now + +############################## +class RegUser (RegRecord): + __tablename__ = 'users' + # these objects will have type='user' in the records table + __mapper_args__ = { 'polymorphic_identity' : 'user' } + record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) + email = Column ('email', String) + + # append stuff at the end of the record __repr__ + def __repr__ (self): + result = RegRecord.__repr__(self).replace("Record","User") + result.replace ("]"," email=%s"%self.email) + return result + + @validates('email') + def validate_email(self, key, address): + assert '@' in address + return address + +class RegAuthority (RegRecord): + __tablename__ = 'authorities' + __mapper_args__ = { 'polymorphic_identity' : 'authority' } + record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) + + # no proper data yet, just hack the typename + def __repr__ (self): + return RegRecord.__repr__(self).replace("Record","Authority") + +class RegSlice (RegRecord): + __tablename__ = 'slices' + __mapper_args__ = { 'polymorphic_identity' : 'slice' } + record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) + + def __repr__ (self): + return RegRecord.__repr__(self).replace("Record","Slice") + +class RegNode (RegRecord): + __tablename__ = 'nodes' + __mapper_args__ = { 'polymorphic_identity' : 'node' } + record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) + + def __repr__ (self): + return RegRecord.__repr__(self).replace("Record","Node") + +############################## +# although the db needs of course to be reachable, +# the schema management functions are here and not in alchemy +# because the actual details of the classes need to be known +# migrations: this code has no notion of the previous versions +# of the data model nor of migrations +# sfa.storage.migrations.db_init uses this when starting from +# a fresh db only +def init_tables(engine): + logger.info("Initializing db schema from current/latest model") + Base.metadata.create_all(engine) + +def drop_tables(engine): + logger.info("Dropping tables from current/latest model") + Base.metadata.drop_all(engine) + +############################## +# create a record of the right type from either a dict or an xml string +def make_record (dict={}, xml=""): + if dict: return make_record_dict (dict) + elif xml: return make_record_xml (xml) + else: raise Exception("make_record has no input") + +# convert an incoming record - typically from xmlrpc - into an object +def make_record_dict (record_dict): + assert ('type' in record_dict) + type=record_dict['type'].split('+')[0] + if type=='authority': + result=RegAuthority (dict=record_dict) + elif type=='user': + result=RegUser (dict=record_dict) + elif type=='slice': + result=RegSlice (dict=record_dict) + elif type=='node': + result=RegNode (dict=record_dict) + else: + result=RegRecord (dict=record_dict) + logger.info ("converting dict into Reg* with type=%s"%type) + logger.info ("returning=%s"%result) + # xxx todo + # register non-db attributes in an extensions field + return result + +def make_record_xml (xml): + xml_record = XML(xml) + xml_dict = xml_record.todict() + logger.info("load from xml, keys=%s"%xml_dict.keys()) + return make_record_dict (xml_dict) diff --git a/sfa/storage/record.py b/sfa/storage/record.py deleted file mode 100644 index f9f807e9..00000000 --- a/sfa/storage/record.py +++ /dev/null @@ -1,463 +0,0 @@ -## -# Implements support for SFA records -# -# TODO: Use existing PLC database methods? or keep this separate? -## - -from types import StringTypes -from sfa.trust.gid import GID -from sfa.storage.parameter import Parameter -from sfa.util.xrn import get_authority -from sfa.storage.row import Row -from sfa.util.xml import XML -from sfa.util.sfalogging import logger - -class SfaRecord(Row): - """ - The SfaRecord class implements an SFA Record. A SfaRecord is a tuple - (Hrn, GID, Type, Info). - - Hrn specifies the Human Readable Name of the object - GID is the GID of the object - Type is user | authority | slice | component - - Info is comprised of the following sub-fields - pointer = a pointer to the record in the PL database - - The pointer is interpreted depending on the type of the record. For example, - if the type=="user", then pointer is assumed to be a person_id that indexes - into the persons table. - - A given HRN may have more than one record, provided that the records are - of different types. - """ - -# table_name = 'sfa' -# primary_key = 'record_id' - - ### the wsdl generator assumes this is named 'fields' - internal_fields = { - 'record_id': Parameter(int, "An id that uniquely identifies this record", ro=True), - 'pointer': Parameter(int, "An id that uniquely identifies this record in an external database") - } - - fields = { - 'authority': Parameter(str, "The authority for this record"), - 'peer_authority': Parameter(str, "The peer authority for this record"), - 'hrn': Parameter(str, "Human readable name of object"), - 'gid': Parameter(str, "GID of the object"), - 'type': Parameter(str, "Record type"), - 'last_updated': Parameter(int, "Date and time of last update", ro=True), - 'date_created': Parameter(int, "Date and time this record was created", ro=True), - } - all_fields = dict(fields.items() + internal_fields.items()) - ## - # Create an SFA Record - # - # @param name if !=None, assign the name of the record - # @param gid if !=None, assign the gid of the record - # @param type one of user | authority | slice | component - # @param pointer is a pointer to a PLC record - # @param dict if !=None, then fill in this record from the dictionary - - def __init__(self, hrn=None, gid=None, type=None, pointer=None, authority=None, - peer_authority=None, dict=None, string=None): - self.dirty = True - self.hrn = None - self.gid = None - self.type = None - self.pointer = None - self.set_peer_auth(peer_authority) - self.set_authority(authority) - if hrn: - self.set_name(hrn) - if gid: - self.set_gid(gid) - if type: - self.set_type(type) - if pointer: - self.set_pointer(pointer) - if dict: - self.load_from_dict(dict) - if string: - self.load_from_string(string) - - - def validate_last_updated(self, last_updated): - return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) - - def update(self, new_dict): - if isinstance(new_dict, list): - new_dict = new_dict[0] - - # Convert any boolean strings to real bools - for key in new_dict: - if isinstance(new_dict[key], StringTypes): - if new_dict[key].lower() in ["true"]: - new_dict[key] = True - elif new_dict[key].lower() in ["false"]: - new_dict[key] = False - dict.update(self, new_dict) - - ## - # Set the name of the record - # - # @param hrn is a string containing the HRN - - def set_name(self, hrn): - """ - Set the name of the record - """ - self.hrn = hrn - self['hrn'] = hrn - self.dirty = True - - def set_authority(self, authority): - """ - Set the authority - """ - if not authority: - authority = "" - self.authority = authority - self['authority'] = authority - self.dirty = True - - - ## - # Set the GID of the record - # - # @param gid is a GID object or the string representation of a GID object - - def set_gid(self, gid): - """ - Set the GID of the record - """ - - if isinstance(gid, StringTypes): - self.gid = gid - self['gid'] = gid - else: - self.gid = gid.save_to_string(save_parents=True) - self['gid'] = gid.save_to_string(save_parents=True) - self.dirty = True - - ## - # Set the type of the record - # - # @param type is a string: user | authority | slice | component - - def set_type(self, type): - """ - Set the type of the record - """ - self.type = type - self['type'] = type - self.dirty = True - - ## - # Set the pointer of the record - # - # @param pointer is an integer containing the ID of a PLC record - - def set_pointer(self, pointer): - """ - Set the pointer of the record - """ - self.pointer = pointer - self['pointer'] = pointer - self.dirty = True - - - def set_peer_auth(self, peer_authority): - self.peer_authority = peer_authority - self['peer_authority'] = peer_authority - self.dirty = True - - ## - # Return the name (HRN) of the record - - def get_name(self): - """ - Return the name (HRN) of the record - """ - return self.hrn - - ## - # Return the type of the record - - def get_type(self): - """ - Return the type of the record - """ - return self.type - - ## - # Return the pointer of the record. The pointer is an integer that may be - # used to look up the record in the PLC database. The evaluation of pointer - # depends on the type of the record - - def get_pointer(self): - """ - Return the pointer of the record. The pointer is an integer that may be - used to look up the record in the PLC database. The evaluation of pointer - depends on the type of the record - """ - return self.pointer - - ## - # Return the GID of the record, in the form of a GID object - # TODO: not the best name for the function, because we have things called - # gidObjects in the Cred - - def get_gid_object(self): - """ - Return the GID of the record, in the form of a GID object - """ - return GID(string=self.gid) - - ## - # Returns the value of a field - - def get_field(self, fieldname, default=None): - # sometimes records act like classes, and sometimes they act like dicts - try: - return getattr(self, fieldname) - except AttributeError: - try: - return self[fieldname] - except KeyError: - if default != None: - return default - else: - raise - - ## - # Returns a list of field names in this record. - - def get_field_names(self): - """ - Returns a list of field names in this record. - """ - return self.fields.keys() - - ## - # Given a field name ("hrn", "gid", ...) return the value of that field. - # - # @param fieldname is the name of field to be returned - - def get_field_value_string(self, fieldname): - """ - Given a field name ("hrn", "gid", ...) return the value of that field. - """ - if fieldname == "authority": - val = get_authority(self['hrn']) - else: - try: - val = getattr(self, fieldname) - except: - val = self[fieldname] - if isinstance(val, str): - return "'" + str(val) + "'" - else: - return str(val) - - ## - # Given a list of field names, return a list of values for those public. - # - # @param fieldnames is a list of field names - - def get_field_value_strings(self, fieldnames): - """ - Given a list of field names, return a list of values for those public. - """ - return [ self.get_field_value_string (fieldname) for fieldname in fieldnames ] - - ## - # Return the record in the form of a dictionary - - def as_dict(self): - """ - Return the record in the form of a dictionary - """ - return dict(self) - - ## - # Load the record from a dictionary - # - # @param dict dictionary to load record public from - - def load_from_dict(self, dict): - """ - Load the record from a dictionary - """ - - self.set_name(dict['hrn']) - gidstr = dict.get("gid", None) - if gidstr: - self.set_gid(dict['gid']) - - if "pointer" in dict: - self.set_pointer(dict['pointer']) - - self.set_type(dict['type']) - self.update(dict) - - ## - # Save the record to a string. The string contains an XML representation of - # the record. - - def save_to_string(self): - """ - Save the record to a string. The string contains an XML representation of - the record. - """ - recorddict = self.as_dict() - filteredDict = dict([(key, val) for (key, val) in recorddict.iteritems() if key in self.fields.keys()]) - xml_record = XML('') - xml_record.parse_dict(filteredDict) - str = xml_record.toxml() - return str - - ## - # Load the record from a string. The string is assumed to contain an XML - # representation of the record. - - def load_from_string(self, str): - """ - Load the record from a string. The string is assumed to contain an XML - representation of the record. - """ - #dict = xmlrpclib.loads(str)[0][0] - - xml_record = XML(str) - self.load_from_dict(xml_record.todict()) - - ## - # Dump the record to stdout - # - # @param dump_parents if true, then the parents of the GID will be dumped - - def dump(self, dump_parents=False): - """ - Walk tree and dump records. - """ - #print "RECORD", self.name - #print " hrn:", self.name - #print " type:", self.type - #print " gid:" - #if (not self.gid): - # print " None" - #else: - # self.get_gid_object().dump(8, dump_parents) - #print " pointer:", self.pointer - - order = SfaRecord.fields.keys() - for key in self.keys(): - if key not in order: - order.append(key) - for key in order: - if key in self and key in self.fields: - if key in 'gid' and self[key]: - gid = GID(string=self[key]) - print " %s:" % key - gid.dump(8, dump_parents) - else: - print " %s: %s" % (key, self[key]) - - def summary_string(self): - return "Record(record_id=%s, hrn=%s, type=%s, authority=%s, pointer=%s)" % \ - (self.get('record_id'), self.get('hrn'), self.get('type'), self.get('authority'), \ - self.get('pointer')) - - def getdict(self): - return dict(self) - - def sync(self): - """ - Sync this record with the database. - """ - from sfa.storage.table import SfaTable - table = SfaTable() - filter = {} - if self.get('record_id'): - filter['record_id'] = self.get('record_id') - if self.get('hrn') and self.get('type'): - filter['hrn'] = self.get('hrn') - filter['type'] = self.get('type') - if self.get('pointer'): - filter['pointer'] = self.get('pointer') - existing_records = table.find(filter) - if not existing_records: - table.insert(self) - else: - existing_record = existing_records[0] - self['record_id'] = existing_record['record_id'] - table.update(self) - - def delete(self): - """ - Remove record from the database. - """ - from sfa.storage.table import SfaTable - table = SfaTable() - filter = {} - if self.get('record_id'): - filter['record_id'] = self.get('record_id') - if self.get('hrn') and self.get('type'): - filter['hrn'] = self.get('hrn') - filter['type'] = self.get('type') - if self.get('pointer'): - filter['pointer'] = self.get('pointer') - if filter: - existing_records = table.find(filter) - for record in existing_records: - table.remove(record) - -class UserRecord(SfaRecord): - - fields = { - 'email': Parameter(str, 'email'), - 'first_name': Parameter(str, 'First name'), - 'last_name': Parameter(str, 'Last name'), - 'phone': Parameter(str, 'Phone Number'), - 'keys': Parameter(str, 'Public key'), - 'slices': Parameter([str], 'List of slices this user belongs to'), - } - fields.update(SfaRecord.fields) - -class SliceRecord(SfaRecord): - fields = { - 'name': Parameter(str, 'Slice name'), - 'url': Parameter(str, 'Slice url'), - 'expires': Parameter(int, 'Date and time this slice exipres'), - 'researcher': Parameter([str], 'List of users for this slice'), - 'PI': Parameter([str], 'List of PIs responsible for this slice'), - 'description': Parameter([str], 'Description of this slice'), - } - fields.update(SfaRecord.fields) - - -class NodeRecord(SfaRecord): - fields = { - 'hostname': Parameter(str, 'This nodes dns name'), - 'node_type': Parameter(str, 'Type of node this is'), - 'latitude': Parameter(str, 'latitude'), - 'longitude': Parameter(str, 'longitude'), - } - fields.update(SfaRecord.fields) - - -class AuthorityRecord(SfaRecord): - fields = { - 'name': Parameter(str, 'Name'), - 'login_base': Parameter(str, 'login base'), - 'enabled': Parameter(bool, 'Is this site enabled'), - 'url': Parameter(str, 'URL'), - 'nodes': Parameter([str], 'List of nodes at this site'), - 'operator': Parameter([str], 'List of operators'), - 'researcher': Parameter([str], 'List of researchers'), - 'PI': Parameter([str], 'List of Principal Investigators'), - } - fields.update(SfaRecord.fields) - - diff --git a/sfa/storage/row.py b/sfa/storage/row.py deleted file mode 100644 index ef668445..00000000 --- a/sfa/storage/row.py +++ /dev/null @@ -1,58 +0,0 @@ - -class Row(dict): - - # Set this to the name of the table that stores the row. - # e.g. table_name = "nodes" - table_name = None - - # Set this to the name of the primary key of the table. It is - # assumed that the this key is a sequence if it is not set when - # sync() is called. - # e.g. primary_key="record_id" - primary_key = None - - # Set this to the names of tables that reference this table's - # primary key. - join_tables = [] - - def validate(self): - """ - Validates values. Will validate a value with a custom function - if a function named 'validate_[key]' exists. - """ - # Warn about mandatory fields - # XX TODO: Support checking for mandatory fields later - #mandatory_fields = self.db.fields(self.table_name, notnull = True, hasdef = False) - #for field in mandatory_fields: - # if not self.has_key(field) or self[field] is None: - # raise SfaInvalidArgument, field + " must be specified and cannot be unset in class %s"%self.__class__.__name__ - - # Validate values before committing - for (key, value) in self.iteritems(): - if value is not None and hasattr(self, 'validate_' + key): - validate = getattr(self, 'validate_' + key) - self[key] = validate(value) - - - def validate_timestamp(self, timestamp, check_future = False): - """ - Validates the specified GMT timestamp string (must be in - %Y-%m-%d %H:%M:%S format) or number (seconds since UNIX epoch, - i.e., 1970-01-01 00:00:00 GMT). If check_future is True, - raises an exception if timestamp is not in the future. Returns - a GMT timestamp string. - """ - - time_format = "%Y-%m-%d %H:%M:%S" - if isinstance(timestamp, StringTypes): - # calendar.timegm() is the inverse of time.gmtime() - timestamp = calendar.timegm(time.strptime(timestamp, time_format)) - - # Human readable timestamp string - human = time.strftime(time_format, time.gmtime(timestamp)) - - if check_future and timestamp < time.time(): - raise SfaInvalidArgument, "'%s' not in the future" % human - - return human - diff --git a/sfa/storage/sfa.sql b/sfa/storage/sfa.sql deleted file mode 100644 index 9a2792c8..00000000 --- a/sfa/storage/sfa.sql +++ /dev/null @@ -1,68 +0,0 @@ --- --- SFA database schema --- - -SET client_encoding = 'UNICODE'; - --------------------------------------------------------------------------------- --- Version --------------------------------------------------------------------------------- - --- Database version -CREATE TABLE sfa_db_version ( - version integer NOT NULL, - subversion integer NOT NULL DEFAULT 0 -) WITH OIDS; - --- the migration scripts do not use the major 'version' number --- so 5.0 sets subversion at 100 --- in case your database misses the site and persons tags feature, --- you might wish to first upgrade to 4.3-rc16 before moving to some 5.0 --- or run the up script here --- http://svn.planet-lab.org/svn/PLCAPI/branches/4.3/migrations/ - -INSERT INTO sfa_db_version (version, subversion) VALUES (1, 1); - --------------------------------------------------------------------------------- --- Aggregates and store procedures --------------------------------------------------------------------------------- - --- Like MySQL GROUP_CONCAT(), this function aggregates values into a --- PostgreSQL array. -CREATE AGGREGATE array_accum ( - sfunc = array_append, - basetype = anyelement, - stype = anyarray, - initcond = '{}' -); - --- Valid record types -CREATE TABLE record_types ( - record_type text PRIMARY KEY -) WITH OIDS; -INSERT INTO record_types (record_type) VALUES ('authority'); -INSERT INTO record_types (record_type) VALUES ('authority+sa'); -INSERT INTO record_types (record_type) VALUES ('authority+am'); -INSERT INTO record_types (record_type) VALUES ('authority+sm'); -INSERT INTO record_types (record_type) VALUES ('user'); -INSERT INTO record_types (record_type) VALUES ('slice'); -INSERT INTO record_types (record_type) VALUES ('node'); - - --- main table -CREATE TABLE records ( - record_id serial PRIMARY KEY , - hrn text NOT NULL, - authority text NOT NULL, - peer_authority text, - gid text, - type text REFERENCES record_types, - pointer integer, - date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, - last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP -); -CREATE INDEX sfa_hrn_ids on records (hrn); -CREATE INDEX sfa_type_ids on records (type); -CREATE INDEX sfa_authority_ids on records (authority); -CREATE INDEX sfa_peer_authority_ids on records (peer_authority); -CREATE INDEX sfa_pointer_ids on records (pointer); diff --git a/sfa/storage/table.py b/sfa/storage/table.py deleted file mode 100644 index 602bcabe..00000000 --- a/sfa/storage/table.py +++ /dev/null @@ -1,151 +0,0 @@ -# -# implements support for SFA records stored in db tables -# -# TODO: Use existing PLC database methods? or keep this separate? - -from types import StringTypes - -from sfa.util.config import Config - -from sfa.storage.parameter import Parameter -from sfa.storage.filter import Filter -from sfa.storage.PostgreSQL import PostgreSQL -from sfa.storage.record import SfaRecord, AuthorityRecord, NodeRecord, SliceRecord, UserRecord - -class SfaTable(list): - - SFA_TABLE_PREFIX = "records" - - def __init__(self, record_filter = None): - - # pgsql doesn't like table names with "." in them, to replace it with "$" - self.tablename = SfaTable.SFA_TABLE_PREFIX - self.config = Config() - self.db = PostgreSQL(self.config) - - if record_filter: - records = self.find(record_filter) - for record in records: - self.append(record) - - def db_fields(self, obj=None): - - db_fields = self.db.fields(self.SFA_TABLE_PREFIX) - return dict( [ (key,value) for (key, value) in obj.iteritems() \ - if key in db_fields and - self.is_writable(key, value, SfaRecord.fields)] ) - - @staticmethod - def is_writable (key,value,dict): - # if not mentioned, assume it's writable (e.g. deleted ...) - if key not in dict: return True - # if mentioned but not linked to a Parameter object, idem - if not isinstance(dict[key], Parameter): return True - # if not marked ro, it's writable - if not dict[key].ro: return True - - return False - - - def clear (self): - self.db.do("DELETE from %s"%self.tablename) - self.db.commit() - - # what sfa-nuke does - def nuke (self): - self.clear() - - def remove(self, record): - params = {'record_id': record['record_id']} - template = "DELETE FROM %s " % self.tablename - sql = template + "WHERE record_id = %(record_id)s" - self.db.do(sql, params) - - # if this is a site, remove all records where 'authority' == the - # site's hrn - if record['type'] == 'authority': - params = {'authority': record['hrn']} - sql = template + "WHERE authority = %(authority)s" - self.db.do(sql, params) - self.db.commit() - - def insert(self, record): - db_fields = self.db_fields(record) - keys = db_fields.keys() - values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()] - query_str = "INSERT INTO " + self.tablename + \ - "(" + ",".join(keys) + ") " + \ - "VALUES(" + ",".join(values) + ")" - self.db.do(query_str, db_fields) - self.db.commit() - result = self.find({'hrn': record['hrn'], 'type': record['type'], 'peer_authority': record['peer_authority']}) - if not result: - record_id = None - elif isinstance(result, list): - record_id = result[0]['record_id'] - else: - record_id = result['record_id'] - - return record_id - - def update(self, record): - db_fields = self.db_fields(record) - keys = db_fields.keys() - values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()] - columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)] - query_str = "UPDATE %s SET %s WHERE record_id = %s" % \ - (self.tablename, ", ".join(columns), record['record_id']) - self.db.do(query_str, db_fields) - self.db.commit() - - def quote_string(self, value): - return str(self.db.quote(value)) - - def quote(self, value): - return self.db.quote(value) - - def find(self, record_filter = None, columns=None): - if not columns: - columns = "*" - else: - columns = ",".join(columns) - sql = "SELECT %s FROM %s WHERE True " % (columns, self.tablename) - - if isinstance(record_filter, (list, tuple, set)): - ints = filter(lambda x: isinstance(x, (int, long)), record_filter) - strs = filter(lambda x: isinstance(x, StringTypes), record_filter) - record_filter = Filter(SfaRecord.all_fields, {'record_id': ints, 'hrn': strs}) - sql += "AND (%s) %s " % record_filter.sql("OR") - elif isinstance(record_filter, dict): - record_filter = Filter(SfaRecord.all_fields, record_filter) - sql += " AND (%s) %s" % record_filter.sql("AND") - elif isinstance(record_filter, StringTypes): - record_filter = Filter(SfaRecord.all_fields, {'hrn':[record_filter]}) - sql += " AND (%s) %s" % record_filter.sql("AND") - elif isinstance(record_filter, int): - record_filter = Filter(SfaRecord.all_fields, {'record_id':[record_filter]}) - sql += " AND (%s) %s" % record_filter.sql("AND") - - results = self.db.selectall(sql) - if isinstance(results, dict): - results = [results] - return results - - def findObjects(self, record_filter = None, columns=None): - - results = self.find(record_filter, columns) - result_rec_list = [] - for result in results: - if result['type'] in ['authority']: - result_rec_list.append(AuthorityRecord(dict=result)) - elif result['type'] in ['node']: - result_rec_list.append(NodeRecord(dict=result)) - elif result['type'] in ['slice']: - result_rec_list.append(SliceRecord(dict=result)) - elif result['type'] in ['user']: - result_rec_list.append(UserRecord(dict=result)) - else: - result_rec_list.append(SfaRecord(dict=result)) - return result_rec_list - - diff --git a/sfa/trust/credential.py b/sfa/trust/credential.py index c04b1236..c4e6982d 100644 --- a/sfa/trust/credential.py +++ b/sfa/trust/credential.py @@ -278,6 +278,7 @@ class Credential(object): self.decode() return self.gidObject.get_printable_subject() + # sounds like this should be __repr__ instead ?? def get_summary_tostring(self): if not self.gidObject: self.decode() diff --git a/sfa/trust/rights.py b/sfa/trust/rights.py index db881237..eb0bb747 100644 --- a/sfa/trust/rights.py +++ b/sfa/trust/rights.py @@ -73,6 +73,7 @@ def determine_rights(type, name): rl.add("bind") rl.add("control") rl.add("info") +# wouldn't that be authority+cm instead ? elif type == "component": rl.add("operator") return rl diff --git a/sfa/util/sfatablesRuntime.py b/sfa/util/sfatablesRuntime.py index 24437772..0bc88f6c 100644 --- a/sfa/util/sfatablesRuntime.py +++ b/sfa/util/sfatablesRuntime.py @@ -41,7 +41,7 @@ try: except: - from sfa.util.logging import logger + from sfa.util.sfalogging import logger def run_sfatables (_,__,___, rspec, ____=None): logger.warning("Cannot import sfatables.runtime, please install package sfa-sfatables") return rspec diff --git a/sfa/util/xrn.py b/sfa/util/xrn.py index 1f506289..fb9e864f 100644 --- a/sfa/util/xrn.py +++ b/sfa/util/xrn.py @@ -130,6 +130,13 @@ class Xrn: # if not type: # debug_logger.debug("type-less Xrn's are not safe") + def __repr__ (self): + result="