python setup.py install --root=$(DESTDIR)
chmod 444 $(DESTDIR)/etc/sfa/default_config.xml
rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info
- rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/sfa.sql
+ rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/migrations
(cd $(DESTDIR)/usr/bin ; ln -s sfi.py sfi; ln -s sfascan.py sfascan)
python-clean: version-clean
CLIENTS = $(shell ls sfa/clientbin/*.py)
BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
- ./sfa/importer/sfa-import-plc.py ./sfa/importer/sfa-nuke-plc.py ./sfa/server/sfa-start.py \
+ ./sfa/server/sfa-start.py \
+ ./sfa/importer/sfa-import.py ./sfa/importer/sfa-nuke.py \
$(CLIENTS)
synccheck:
@exit 1
endif
-sync: synccheck
- +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/
- +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa
+
+synclib: synccheck
+ +$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/lib\*/python2.\*/site-packages/
+syncbin: synccheck
+$(RSYNC) $(BINS) $(SSHURL)/usr/bin/
+syncinit: synccheck
+$(RSYNC) ./init.d/sfa $(SSHURL)/etc/init.d/
+syncconfig:
+$(RSYNC) ./config/default_config.xml $(SSHURL)/etc/sfa/
- +$(RSYNC) ./sfa/storage/sfa.sql $(SSHURL)/usr/share/sfa/
+synctest: synccheck
+ +$(RSYNC) ./tests/ $(SSHURL)/root/tests-sfa
+syncrestart: synccheck
$(SSHCOMMAND) exec service sfa restart
+syncmig:
+ +$(RSYNC) ./sfa/storage/migrations $(SSHURL)/usr/share/sfa/
+
+
+# full-fledged
+sync: synclib syncbin syncinit syncconfig syncrestart
# 99% of the time this is enough
-fastsync: synccheck
- +$(RSYNC) --relative ./sfa/ $(SSHURL)/usr/lib\*/python2.\*/site-packages/
- $(SSHCOMMAND) exec service sfa restart
+syncfast: synclib syncrestart
-clientsync: synccheck
- +$(RSYNC) $(BINS) $(SSHURL)/usr/bin/
+.PHONY: synccheck synclib syncbin syncconfig synctest syncrestart sync syncfast
-ricasync: synccheck
- +$(RSYNC) --relative ./sfa/fd ./sfa/generic/fd.py ./sfa/rspecs/versions/federica.py $(SSHURL)/usr/lib\*/python2.\*/site-packages/
+syncrica: synccheck
+ +$(RSYNC) --relative ./sfa/federica ./sfa/generic/fd.py ./sfa/rspecs/versions/federica.py $(SSHURL)/usr/lib\*/python2.\*/site-packages/
$(SSHCOMMAND) exec service sfa restart
-.PHONY: synccheck sync fastsync clientsync ricasync
+.PHONY: syncrica
##########
CLIENTLIBFILES= \
</variablelist>
</category>
+ <!-- ======================================== -->
+ <category id="sfa_flashpolicy">
+ <name>SFA Flash Policy</name>
+ <description>The settings that affect the flash policy server that will run
+ as part of this SFA instance.</description>
+
+ <variablelist>
+ <variable id="enabled" type="boolean">
+ <name>Enable Flash Policy Server</name>
+ <value>false</value>
+ <description>Allows this local SFA instance to run a
+ flash policy server.</description>
+ </variable>
+ <variable id="config_file" type="string">
+ <name>Flash policy config file</name>
+ <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
+ <description>The path to where the flash policy config file can be reached.</description>
+ </variable>
+ <variable id="port" type="int">
+ <name>Flash policy port</name>
+ <value>843</value>
+ <description>The flash policy server port.</description>
+ </variable>
+ </variablelist>
+ </category>
+
<!-- ======================================== -->
<category id="sfa_plc">
<name></name>
</variablelist>
</category>
-
<!-- ======================================== -->
- <category id="sfa_flashpolicy">
- <name>SFA Flash Policy</name>
- <description>The settings that affect the flash policy server that will run
- as part of this SFA instance.</description>
+ <category id="sfa_federica">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
<variablelist>
- <variable id="enabled" type="boolean">
- <name>Enable Flash Policy Server</name>
- <value>false</value>
- <description>Allows this local SFA instance to run a
- flash policy server.</description>
- </variable>
- <variable id="config_file" type="string">
- <name>Flash policy config file</name>
- <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
- <description>The path to where the flash policy config file can be reached.</description>
- </variable>
- <variable id="port" type="int">
- <name>Flash policy port</name>
- <value>843</value>
- <description>The flash policy server port.</description>
- </variable>
+ <variable id="user" type="string">
+ <name>FEDERICA login name for an admin user; SFA will carry on operations under this account.</name>
+ <value>root</value>
+ <description></description>
+ </variable>
+
+ <variable id="password" type="string">
+ <name>Password</name>
+ <value>rootpassword</value>
+ <description>The PLC password for SFA_PLC_USER.</description>
+ </variable>
+
+ <variable id="hostname" type="string">
+ <name>XMLRPC hostname</name>
+ <value>federica.net</value>
+ <description>Hostname for the federica xmlrpc interface.</description>
+ </variable>
+
+ <variable id="port" type="string">
+ <name>XMLRPC port number</name>
+ <value>10000</value>
+ <description>Port number for the federica xmlrpc interface.</description>
+ </variable>
+
</variablelist>
</category>
-
<!-- ======================================== -->
<category id="sfa_nova">
<name>SFA Flash Policy</name>
# raise plc_config.ConfigurationException(errStr)
usual_variables = [
+ "SFA_GENERIC_FLAVOUR",
"SFA_INTERFACE_HRN",
"SFA_REGISTRY_ROOT_AUTH",
"SFA_REGISTRY_HOST",
"SFA_AGGREGATE_HOST",
"SFA_SM_HOST",
+ "SFA_DB_HOST",
"SFA_PLC_URL",
"SFA_PLC_USER",
"SFA_PLC_PASSWORD",
- "SFA_DB_HOST",
]
configuration={ \
#
# minute hour day-of-month month day-of-week user command
# once or twice an hour makes sense
-0 * * * * root /usr/bin/sfa-import-plc.py >> /var/log/sfa_import.log 2>&1
+0 * * * * root /usr/bin/sfa-import.py >> /var/log/sfa_import.log 2>&1
# this is needed only if you run RefreshPeer
#0 0 * * * root /usr/bin/sfa-clean-peer-records.py >> /var/log/sfa_import.log 2>&1
return 1
}
+# use a single date of this script invocation for the dump_*_db functions.
+DATE=$(date +"%Y-%m-%d-%H-%M-%S")
+
+# Dumps the database - optional argument to specify filename suffix
+function dump_sfa_db() {
+ if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi
+ mkdir -p /usr/share/sfa/backups
+ dumpfile=/usr/share/sfa/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql")
+ pg_dump -U $SFA_DB_USER $SFA_DB_NAME > $dumpfile
+ echo "Saved sfa database in $dumpfile"
+ check
+}
# Regenerate configuration files - almost verbatim from plc.init
function reload () {
if ! psql -U $SFA_DB_USER -c "" $SFA_DB_NAME >/dev/null 2>&1 ; then
createdb -U postgres --template=template0 --encoding=UNICODE --owner=$SFA_DB_USER $SFA_DB_NAME
check
- # install db schema
- psql -U $SFA_DB_USER -f /usr/share/sfa/sfa.sql $SFA_DB_NAME
- check
fi
check
reload
db_start
+ # migrations are now handled in the code by sfa.storage.dbschema
# install peer certs
action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS
status sfa-start.py
RETVAL=$?
;;
+ dbdump)
+ dump_sfa_db
+ ;;
*)
- echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+ echo $"Usage: $0 {start|stop|reload|restart|condrestart|status|dbdump}"
exit 1
;;
esac
[
'config/sfa-config-tty',
'config/gen-sfa-cm-config.py',
- 'sfa/importer/sfa-import-plc.py',
- 'sfa/importer/sfa-nuke-plc.py',
+ 'sfa/importer/sfa-import.py',
+ 'sfa/importer/sfa-nuke.py',
'sfa/server/sfa-ca.py',
'sfa/server/sfa-start.py',
'sfa/server/sfa_component_setup.py',
('/etc/sfatables/matches/', glob('sfatables/matches/*.xml')),
('/etc/sfatables/targets/', glob('sfatables/targets/*.xml')),
('/etc/init.d/', [ "init.d/%s"%x for x in initscripts ]),
- ('/usr/share/sfa/', [ 'sfa/storage/sfa.sql' ] ),
+ ('/usr/share/sfa/migrations', glob('sfa/storage/migrations/*.*') ),
+ ('/usr/share/sfa/migrations/versions', glob('sfa/storage/migrations/versions/*') ),
('/usr/share/sfa/examples/', glob('sfa/examples/*' ) + [ 'cron.d/sfa.cron' ] ),
]
%define name sfa
-%define version 2.0
-%define taglevel 10
+%define version 2.1
+%define taglevel 2
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Summary: the SFA python libraries
Group: Applications/System
BuildRequires: make
+
+Requires: myplc-config
Requires: python >= 2.5
+Requires: pyOpenSSL >= 0.7
Requires: m2crypto
Requires: xmlsec1-openssl-devel
Requires: libxslt-python
Requires: postgresql >= 8.2, postgresql-server >= 8.2
Requires: postgresql-python
Requires: python-psycopg2
-Requires: pyOpenSSL >= 0.7
-Requires: myplc-config
+# f8=0.4 - f12=0.5 f14=0.6 f16=0.7
+Requires: python-sqlalchemy
+Requires: python-migrate
+# the eucalyptus aggregate uses this module
Requires: python-xmlbuilder
# python 2.5 has uuid module added, for python 2.4 we still need it.
%config /etc/sfa/default_config.xml
%config (noreplace) /etc/sfa/aggregates.xml
%config (noreplace) /etc/sfa/registries.xml
-/usr/share/sfa/sfa.sql
+/usr/share/sfa/migrations
/usr/share/sfa/examples
/var/www/html/wsdl/*.wsdl
/etc/sfa/xml.xsd
/etc/sfa/protogeni-rspec-common.xsd
/etc/sfa/topology
-%{_bindir}/sfa-import-plc.py*
-%{_bindir}/sfa-nuke-plc.py*
+%{_bindir}/sfa-import.py*
+%{_bindir}/sfa-nuke.py*
%{_bindir}/gen-sfa-cm-config.py*
%{_bindir}/sfa-ca.py*
%files tests
%{_datadir}/sfa/tests
-### sfa-plc installs the 'sfa' service
-%post plc
+### sfa installs the 'sfa' service
+%post
chkconfig --add sfa
-%preun plc
+%preun
if [ "$1" = 0 ] ; then
/sbin/service sfa stop || :
/sbin/chkconfig --del sfa || :
fi
-%postun plc
-[ "$1" -ge "1" ] && service sfa restart
+%postun
+[ "$1" -ge "1" ] && { service sfa dbdump ; service sfa restart ; }
### sfa-cm installs the 'sfa-cm' service
%post cm
[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Wed Feb 08 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-2
+- registry database has user's keys and mail (known as v0 for migrate)
+- pl importer properly maintains user's keys and mail
+- pl driver now to handle 'role' when adding person record (exp.)
+- first draft of federica driver with config section
+- SFA_GENERIC_FLAVOUR in usual variables for sfa-config-tty
+- plus, from master as of tag merged-in-sfa-2.1-2:
+- disk_image revisited
+- new nova_shell nova_driver & various tweaks for openstack
+
+* Fri Jan 27 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-1
+- uses sqlalchemy and related migrate
+- thorough migration and upgrade scheme
+- sfa-import.py and sfa-nuke.py (no more -plc), uses FLAVOUR
+- trashed dbinfo stuff in auth hierarchy
+- data model still has little more than plain records
+- checkpoint tag, not yet intended for release
+
* Wed Jan 25 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-10
- client: added -R --raw sfi cmdline option that displays raw server response.
- client: request GENI RSpec by default.
def authority_credential_filename (self, hrn):
return self.credential_filename(hrn,'authority')
def my_gid_filename (self):
- return self.gid_filename ("user", self.hrn)
+ return self.gid_filename (self.hrn, "user")
def gid_filename (self, hrn, type):
return self.fullpath ("%s.%s.gid"%(hrn,type))
from sfa.util.version import version_core
from sfa.util.cache import Cache
-from sfa.storage.record import SfaRecord, UserRecord, SliceRecord, NodeRecord, AuthorityRecord
+from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
+from sfa.storage.model import make_record
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.rspec_converter import RSpecConverter
f.close()
return
-def save_records_to_file(filename, recordList, format="xml"):
+def save_records_to_file(filename, record_dicts, format="xml"):
if format == "xml":
index = 0
- for record in recordList:
+ for record_dict in record_dicts:
if index > 0:
- save_record_to_file(filename + "." + str(index), record)
+ save_record_to_file(filename + "." + str(index), record_dict)
else:
- save_record_to_file(filename, record)
+ save_record_to_file(filename, record_dict)
index = index + 1
elif format == "xmllist":
f = open(filename, "w")
f.write("<recordlist>\n")
- for record in recordList:
- record = SfaRecord(dict=record)
- f.write('<record hrn="' + record.get_name() + '" type="' + record.get_type() + '" />\n')
+ for record_dict in record_dicts:
+ record_obj=make_record (dict=record_dict)
+ f.write('<record hrn="' + record_obj.hrn + '" type="' + record_obj.type + '" />\n')
f.write("</recordlist>\n")
f.close()
elif format == "hrnlist":
f = open(filename, "w")
- for record in recordList:
- record = SfaRecord(dict=record)
- f.write(record.get_name() + "\n")
+ for record_dict in record_dicts:
+ record_obj=make_record (dict=record_dict)
+ f.write(record_obj.hrn + "\n")
f.close()
else:
# this should never happen
print "unknown output format", format
-def save_record_to_file(filename, record):
- if record['type'] in ['user']:
- record = UserRecord(dict=record)
- elif record['type'] in ['slice']:
- record = SliceRecord(dict=record)
- elif record['type'] in ['node']:
- record = NodeRecord(dict=record)
- elif record['type'] in ['authority', 'ma', 'sa']:
- record = AuthorityRecord(dict=record)
- else:
- record = SfaRecord(dict=record)
+def save_record_to_file(filename, record_dict):
+ rec_record = make_record (dict=record_dict)
str = record.save_to_string()
f=codecs.open(filename, encoding='utf-8',mode="w")
f.write(str)
# load methods
def load_record_from_file(filename):
f=codecs.open(filename, encoding="utf-8", mode="r")
- str = f.read()
+ xml_string = f.read()
f.close()
- record = SfaRecord(string=str)
- return record
+ return make_record (xml=xml_string)
import uuid
self.print_help()
sys.exit(1)
hrn = args[0]
- records = self.registry().Resolve(hrn, self.my_credential_string)
- records = filter_records(options.type, records)
- if not records:
+ record_dicts = self.registry().Resolve(hrn, self.my_credential_string)
+ record_dicts = filter_records(options.type, record_dicts)
+ if not record_dicts:
self.logger.error("No record of type %s"% options.type)
+ records = [ make_record (dict=record_dict) for record_dict in record_dicts ]
for record in records:
- if record['type'] in ['user']:
- record = UserRecord(dict=record)
- elif record['type'] in ['slice']:
- record = SliceRecord(dict=record)
- elif record['type'] in ['node']:
- record = NodeRecord(dict=record)
- elif record['type'].startswith('authority'):
- record = AuthorityRecord(dict=record)
- else:
- record = SfaRecord(dict=record)
- if (options.format == "text"):
- record.dump()
- else:
- print record.save_to_string()
+ if (options.format == "text"): record.dump()
+ else: print record.save_as_xml()
if options.file:
- save_records_to_file(options.file, records, options.fileformat)
+ save_records_to_file(options.file, record_dicts, options.fileformat)
return
def add(self, options, args):
sys.exit(1)
record_filepath = args[0]
rec_file = self.get_record_file(record_filepath)
- record = load_record_from_file(rec_file).as_dict()
+ record = load_record_from_file(rec_file).todict()
return self.registry().Register(record, auth_cred)
def update(self, options, args):
sys.exit(1)
rec_file = self.get_record_file(args[0])
record = load_record_from_file(rec_file)
- if record['type'] == "user":
- if record.get_name() == self.user:
+ if record.type == "user":
+ if record.hrn == self.user:
cred = self.my_credential_string
else:
cred = self.my_authority_credential_string()
- elif record['type'] in ["slice"]:
+ elif record.type in ["slice"]:
try:
- cred = self.slice_credential_string(record.get_name())
+ cred = self.slice_credential_string(record.hrn)
except ServerException, e:
# XXX smbaker -- once we have better error return codes, update this
# to do something better than a string compare
cred = self.my_authority_credential_string()
else:
raise
- elif record.get_type() in ["authority"]:
+ elif record.type in ["authority"]:
cred = self.my_authority_credential_string()
- elif record.get_type() == 'node':
+ elif record.type == 'node':
cred = self.my_authority_credential_string()
else:
- raise "unknown record type" + record.get_type()
- record = record.as_dict()
- return self.registry().Update(record, cred)
+ raise "unknown record type" + record.type
+ record_dict = record.todict()
+ return self.registry().Update(record_dict, cred)
def remove(self, options, args):
"remove registry record by name (Remove)"
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.faults import SfaFault
+
+# this is probably too big to swallow but for a starting point..
+from sfa.plc.pldriver import PlDriver
+
+from sfa.federica.fdshell import FdShell
+
+# hardwired for now
+# this could/should be obtained by issuing getRSpecVersion
+federica_version_string="RSpecV2"
+
+#### avail. methods on the federica side as of 2012/02/13
+# listAvailableResources(String credentials, String rspecVersion)
+# listSliceResources(String credentials, String rspecVersion, String sliceUrn)
+# createSlice(String credentials, String sliceUrn, String rspecVersion, String rspecString)
+# deleteSlice(String credentials, String sliceUrn)
+# listSlices()
+# getRSpecVersion()
+##### all return
+# Result: {'code': 0, 'value': RSpec} if success
+# {'code': code_id, 'output': Error message} if error
+
+class FdDriver (PlDriver):
+
+ def __init__ (self,config):
+ PlDriver.__init__ (self, config)
+ self.shell=FdShell(config)
+
+ # the agreement with the federica driver is for them to expose results in a way
+ # compliant with the avpi v2 return code, i.e. a dict with 'code' 'value' 'output'
+ # essentially, either 'code'==0, then 'value' is set to the actual result
+ # otherwise, 'code' is set to an error code and 'output' holds an error message
+ def response (self, from_xmlrpc):
+ if isinstance (from_xmlrpc, dict) and 'code' in from_xmlrpc:
+ if from_xmlrpc['code']==0:
+ return from_xmlrpc['value']
+ else:
+ raise SfaFault(from_xmlrpc['code'],from_xmlrpc['output'])
+ else:
+ logger.warning("unexpected result from federica xmlrpc api")
+ return from_xmlrpc
+
+ def aggregate_version (self):
+ result=[]
+ federica_version_string_api = self.shell.getRSpecVersion()
+ result ['federica_version_string_api']=federica_version_string_api
+ if federica_version_string_api != federica_version_string:
+ result['WARNING']="hard-wired rspec version %d differs from what the API currently exposes"%\
+ federica_version_string
+ return result
+
+ def testbed_name (self):
+ return "federica"
+
+ def list_slices (self, creds, options):
+ return self.response(self.shell.listSlices())
+
+ def sliver_status (self, slice_urn, slice_hrn):
+ return "fddriver.sliver_status: undefined/todo for slice %s"%slice_hrn
+
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ # right now rspec_version is ignored on the federica side
+ # we normally derive it from options
+ # look in cache if client has requested so
+ cached_requested = options.get('cached', True)
+ # global advertisement
+ if not slice_hrn:
+ # self.cache is initialized unless the global config has it turned off
+ if cached_requested and self.cache:
+ # using federica_version_string as the key into the cache
+ rspec = self.cache.get(federica_version_string)
+ if rspec:
+ logger.debug("FdDriver.ListResources: returning cached advertisement")
+ return self.response(rspec)
+ # otherwise, need to get it
+ rspec = self.shell.listAvailableResources (creds, federica_version_string)
+# rspec = self.shell.listAvailableResources (federica_version_string)
+ # cache it for future use
+ if self.cache:
+ logger.debug("FdDriver.ListResources: stores advertisement in cache")
+ self.cache.add(federica_version_string, rspec)
+ return self.response(rspec)
+ # about a given slice : don't cache
+ else:
+ return self.response(self.shell.listSliceResources(creds, federica_version_string, slice_urn))
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ # right now version_string is ignored on the federica side
+ # we normally derive it from options
+ return self.response(self.shell.createSlice(creds, slice_urn, federica_version_string, rspec_string))
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ # right now version_string is ignored on the federica side
+ # we normally derive it from options
+ # xxx not sure if that's currentl supported at all
+ return self.response(self.shell.deleteSlice(creds, slice_urn))
+
+ # for the the following methods we use what is provided by the default driver class
+ #def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+ #def start_slice (self, slice_urn, slice_xrn, creds):
+ #def stop_slice (self, slice_urn, slice_xrn, creds):
+ #def reset_slice (self, slice_urn, slice_xrn, creds):
+ #def get_ticket (self, slice_urn, slice_xrn, creds, rspec, options):
--- /dev/null
+import xmlrpclib
+
+from sfa.util.sfalogging import logger
+
+class FdShell:
+ """
+ A simple xmlrpc shell to a federica API server
+ This class can receive the XMLRPC calls to the federica testbed
+ For safety this is limited to a set of hard-coded calls
+ """
+
+ direct_calls = [ 'listAvailableResources',
+ 'listSliceResources',
+ 'createSlice',
+ 'deleteSlice',
+ 'getRSpecVersion',
+ 'listSlices',
+ ]
+
+ def __init__ ( self, config ) :
+ # xxx to be configurable
+ SFA_FEDERICA_URL = "http://%s:%s@%s:%s/"%\
+ (config.SFA_FEDERICA_USER,config.SFA_FEDERICA_PASSWORD,
+ config.SFA_FEDERICA_HOSTNAME,config.SFA_FEDERICA_PORT)
+ url=SFA_FEDERICA_URL
+ # xxx not sure if java xmlrpc has support for None
+ # self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
+ # xxx turn on verbosity
+ self.proxy = xmlrpclib.Server(url, verbose = True)
+
+ def __getattr__(self, name):
+ def func(*args, **kwds):
+ if name not in FdShell.direct_calls:
+ raise Exception, "Illegal method call %s for FEDERICA driver"%(name)
+ # xxx get credentials from the config ?
+ # right now basic auth data goes into the URL
+ # the API still provides for a first credential arg though
+ credential='xxx-unused-xxx'
+ logger.info("Issuing %s args=%s kwds=%s to federica"%\
+ (name,args,kwds))
+ result=getattr(self.proxy, "AggregateManager.%s"%name)(credential, *args, **kwds)
+ logger.debug('FdShell %s (%s) returned ... '%(name,name))
+ return result
+ return func
+
except:
logger.log_exc("Cannot locate generic instance with flavour=%s"%flavour)
+ # provide default for importer_class
+ def importer_class (self):
+ return None
+
# in the simplest case these can be redefined to the class/module objects to be used
# see pl.py for an example
# some descendant of SfaApi
except:
logger.log_exc_critical(message)
-
--- /dev/null
+#
+from sfa.generic.pl import pl
+
+import sfa.federica.fddriver
+
+class fd (pl):
+
+# the max flavour behaves like pl, except for
+# the aggregate
+ def driver_class (self) :
+ return sfa.federica.fddriver.FdDriver
#
from sfa.generic.pl import pl
-import sfa.managers.aggregate_manager_max
-
class max (pl):
# the max flavour behaves like pl, except for
# the aggregate
def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager_max
return sfa.managers.aggregate_manager_max.AggregateManagerMax
# I believe the component stuff is not implemented
import sfa.managers.aggregate_manager
import sfa.managers.slice_manager
-class openstack (Generic):
-
- # use the standard api class
- def api_class (self):
- return sfa.server.sfaapi.SfaApi
+# use pl as a model so we only redefine what's different
+from sfa.generic.pl import pl
+class openstack (pl):
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.openstackimporter
+ return sfa.importer.openstackimporter.OpenstackImporter
+
# the manager classes for the server-side services
def registry_manager_class (self) :
return sfa.managers.registry_manager_openstack.RegistryManager
- def slicemgr_manager_class (self) :
- return sfa.managers.slice_manager.SliceManager
def aggregate_manager_class (self) :
return sfa.managers.aggregate_manager.AggregateManager
def driver_class (self):
return sfa.openstack.nova_driver.NovaDriver
- # for the component mode, to be run on board planetlab nodes
- # manager class
- def component_manager_class (self):
- return sfa.managers.component_manager_pl
- # driver_class
- def component_driver_class (self):
- return sfa.plc.plcomponentdriver.PlComponentDriver
from sfa.generic import Generic
-
class pl (Generic):
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.plimporter
+ return sfa.importer.plimporter.PlImporter
+
# use the standard api class
def api_class (self):
import sfa.server.sfaapi
# for the component mode, to be run on board planetlab nodes
# manager class
def component_manager_class (self):
+ import sfa.managers
return sfa.managers.component_manager_pl
# driver_class
def component_driver_class (self):
+ import sfa.plc.plcomponentdriver
return sfa.plc.plcomponentdriver.PlComponentDriver
-
--- /dev/null
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+
+from sfa.trust.gid import create_uuid
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
+
+from sfa.openstack.nova_shell import NovaShell
+
+def load_keys(filename):
+ keys = {}
+ tmp_dict = {}
+ try:
+ execfile(filename, tmp_dict)
+ if 'keys' in tmp_dict:
+ keys = tmp_dict['keys']
+ return keys
+ except:
+ return keys
+
+def save_keys(filename, keys):
+ f = open(filename, 'w')
+ f.write("keys = %s" % str(keys))
+ f.close()
+
+class OpenstackImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+ def add_options (self, parser):
+ self.logger.debug ("OpenstackImporter: no options yet")
+ pass
+
+ def run (self, options):
+ # we don't have any options for now
+ self.logger.info ("PlImporter.run : to do")
+
+ config = Config ()
+ interface_hrn = config.SFA_INTERFACE_HRN
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ shell = NovaShell (config)
+
+ # create dict of all existing sfa records
+ existing_records = {}
+ existing_hrns = []
+ key_ids = []
+ for record in dbsession.query(RegRecord):
+ existing_records[ (record.hrn, record.type,) ] = record
+ existing_hrns.append(record.hrn)
+
+ # Get all users
+ persons = shell.user_get_all()
+ persons_dict = {}
+ keys_filename = config.config_path + os.sep + 'person_keys.py'
+ old_person_keys = load_keys(keys_filename)
+ person_keys = {}
+ for person in persons:
+ hrn = config.SFA_INTERFACE_HRN + "." + person.id
+ persons_dict[hrn] = person
+ old_keys = old_person_keys.get(person.id, [])
+ keys = [k.public_key for k in shell.key_pair_get_all_by_user(person.id)]
+ person_keys[person.id] = keys
+ update_record = False
+ if old_keys != keys:
+ update_record = True
+ if hrn not in existing_hrns or \
+ (hrn, 'user') not in existing_records or update_record:
+ urn = hrn_to_urn(hrn, 'user')
+
+ if keys:
+ try:
+ pkey = convert_public_key(keys[0])
+ except:
+ logger.log_exc('unable to convert public key for %s' % hrn)
+ pkey = Keypair(create=True)
+ else:
+ logger.warn("OpenstackImporter: person %s does not have a PL public key"%hrn)
+ pkey = Keypair(create=True)
+ person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ person_record = RegUser ()
+ person_record.type='user'
+ person_record.hrn=hrn
+ person_record.gid=person_gid
+ person_record.authority=get_authority(hrn)
+ dbsession.add(person_record)
+ dbsession.commit()
+ logger.info("OpenstackImporter: imported person %s" % person_record)
+
+ # Get all projects
+ projects = shell.project_get_all()
+ projects_dict = {}
+ for project in projects:
+ hrn = config.SFA_INTERFACE_HRN + '.' + project.id
+ projects_dict[hrn] = project
+ if hrn not in existing_hrns or \
+ (hrn, 'slice') not in existing_records:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'slice')
+ project_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
+ project_record = RegSlice ()
+ project_record.type='slice'
+ project_record.hrn=hrn
+ project_record.gid=project_gid
+ project_record.authority=get_authority(hrn)
+ dbsession.add(project_record)
+ dbsession.commit()
+ logger.info("OpenstackImporter: imported slice: %s" % project_record)
+
+ # remove stale records
+ system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for (record_hrn, type) in existing_records.keys():
+ if record_hrn in system_records:
+ continue
+
+ record = existing_records[(record_hrn, type)]
+ if record.peer_authority:
+ continue
+
+ if type == 'user':
+ if record_hrn in persons_dict:
+ continue
+ elif type == 'slice':
+ if record_hrn in projects_dict:
+ continue
+ else:
+ continue
+
+ record_object = existing_records[ (record_hrn, type) ]
+ logger.info("OpenstackImporter: removing %s " % record)
+ dbsession.delete(record_object)
+ dbsession.commit()
+
+ # save pub keys
+ logger.info('OpenstackImporter: saving current pub keys')
+ save_keys(keys_filename, person_keys)
+
--- /dev/null
+#
+# PlanetLab importer
+#
+# requirements
+#
+# read the planetlab database and update the local registry database accordingly
+# (in other words, with this testbed, the SFA registry is *not* authoritative)
+# so we update the following collections
+# . authorities (from pl sites)
+# . node (from pl nodes)
+# . users+keys (from pl persons and attached keys)
+# known limitation : *one* of the ssh keys is chosen at random here
+# xxx todo/check xxx at the very least, when a key is known to the registry
+# and is still current in plc
+# then we should definitely make sure to keep that one in sfa...
+# . slice+researchers (from pl slices and attached users)
+#
+
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
+
+from sfa.trust.gid import create_uuid
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+from sfa.plc.plshell import PlShell
+
+def _get_site_hrn(interface_hrn, site):
+ # Hardcode 'internet2' into the hrn for sites hosting
+ # internet2 nodes. This is a special operation for some vini
+ # sites only
+ hrn = ".".join([interface_hrn, site['login_base']])
+ if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
+ if site['login_base'].startswith("i2") or site['login_base'].startswith("nlr"):
+ hrn = ".".join([interface_hrn, "internet2", site['login_base']])
+ return hrn
+
+
+class PlImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ # hrn hash is initialized from current db
+ # remember just-created records as we go
+ # xxx might make sense to add a UNIQUE constraint in the db itself
+ def remember_record_by_hrn (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("PlImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ # ditto for pointer hash
+ def remember_record_by_pointer (self, record):
+ if record.pointer == -1:
+ self.logger.warning ("PlImporter.remember_record_by_pointer: pointer is void")
+ return
+ tuple = (record.type, record.pointer)
+ if tuple in self.records_by_type_pointer:
+ self.logger.warning ("PlImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+ def remember_record (self, record):
+ self.remember_record_by_hrn (record)
+ self.remember_record_by_pointer (record)
+
+ def locate_by_type_hrn (self, type, hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ return self.records_by_type_pointer.get ( (type, pointer), None)
+
+ # convenience : try to locate first based on type+pointer
+ # if so, the record was created already even if e.g. its hrn has changed meanwhile
+ # otherwise we try by type+hrn (is this truly useful ?)
+ def locate (self, type, hrn=None, pointer=-1):
+ if pointer!=-1:
+ attempt = self.locate_by_type_pointer (type, pointer)
+ if attempt : return attempt
+ if hrn is not None:
+ attempt = self.locate_by_type_hrn (type, hrn,)
+ if attempt : return attempt
+ return None
+
+ # this makes the run method a bit abtruse - out of the way
+ def create_special_vini_record (self, interface_hrn):
+ # special case for vini
+ if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
+ # create a fake internet2 site first
+ i2site = {'name': 'Internet2', 'login_base': 'internet2', 'site_id': -1}
+ site_hrn = _get_site_hrn(interface_hrn, i2site)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ if ( 'authority', site_hrn, ) not in self.records_by_type_hrn:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ auth_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer=site['site_id'],
+ authority=get_authority(site_hrn))
+ auth_record.just_created()
+ dbsession.add(auth_record)
+ dbsession.commit()
+ self.logger.info("PlImporter: Imported authority (vini site) %s"%auth_record)
+ self.remember_record ( site_record )
+
+ def run (self, options):
+ config = Config ()
+ interface_hrn = config.SFA_INTERFACE_HRN
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ shell = PlShell (config)
+
+ ######## retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ # create hash by (type,hrn)
+ # we essentially use this to know if a given record is already known to SFA
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (record.type, record.pointer) , record ) for record in all_records
+ if record.pointer != -1] )
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records: record.stale=True
+
+ ######## retrieve PLC data
+ # Get all plc sites
+ # retrieve only required stuf
+ sites = shell.GetSites({'peer_id': None, 'enabled' : True},
+ ['site_id','login_base','node_ids','slice_ids','person_ids',])
+ # create a hash of sites by login_base
+# sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
+ # Get all plc users
+ persons = shell.GetPersons({'peer_id': None, 'enabled': True},
+ ['person_id', 'email', 'key_ids', 'site_ids'])
+ # create a hash of persons by person_id
+ persons_by_id = dict ( [ ( person['person_id'], person) for person in persons ] )
+ # Get all plc public keys
+ # accumulate key ids for keys retrieval
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids} )
+ # create a hash of keys by key_id
+ keys_by_id = dict ( [ ( key['key_id'], key ) for key in keys ] )
+ # create a dict person_id -> [ (plc)keys ]
+ keys_by_person_id = {}
+ for person in persons:
+ pubkeys = []
+ for key_id in person['key_ids']:
+ pubkeys.append(keys_by_id[key_id])
+ keys_by_person_id[person['person_id']] = pubkeys
+ # Get all plc nodes
+ nodes = shell.GetNodes( {'peer_id': None}, ['node_id', 'hostname', 'site_id'])
+ # create hash by node_id
+ nodes_by_id = dict ( [ ( node['node_id'], node, ) for node in nodes ] )
+ # Get all plc slices
+ slices = shell.GetSlices( {'peer_id': None}, ['slice_id', 'name', 'person_ids'])
+ # create hash by slice_id
+ slices_by_id = dict ( [ (slice['slice_id'], slice ) for slice in slices ] )
+
+ # isolate special vini case in separate method
+ self.create_special_vini_record (interface_hrn)
+
+ # start importing
+ for site in sites:
+ site_hrn = _get_site_hrn(interface_hrn, site)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ site_record=self.locate ('authority', site_hrn, site['site_id'])
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer=site['site_id'],
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("PlImporter: imported authority (site) : %s" % site_record)
+ self.remember_record (site_record)
+ except:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("PlImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records
+ for node_id in site['node_ids']:
+ try:
+ node = nodes_by_id[node_id]
+ except:
+ self.logger.warning ("PlImporter: cannot find node_id %s - ignored"%node_id)
+ continue
+ site_auth = get_authority(site_hrn)
+ site_name = site['login_base']
+ hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
+ # xxx this sounds suspicious
+ if len(hrn) > 64: hrn = hrn[:64]
+ node_record = self.locate ( 'node', hrn , node['node_id'] )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = RegNode (hrn=hrn, gid=node_gid,
+ pointer =node['node_id'],
+ authority=get_authority(hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("PlImporter: imported node: %s" % node_record)
+ self.remember_record (node_record)
+ except:
+ self.logger.log_exc("PlImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+ node_record.stale=False
+
+ # import persons
+ for person_id in site['person_ids']:
+ try:
+ person = persons_by_id[person_id]
+ except:
+ self.logger.warning ("PlImporter: cannot locate person_id %s - ignored"%person_id)
+ person_hrn = email_to_hrn(site_hrn, person['email'])
+ # xxx suspicious again
+ if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+ user_record = self.locate ( 'user', person_hrn, person['person_id'])
+
+ # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+ def init_person_key (person, plc_keys):
+ pubkey=None
+ if person['key_ids']:
+ # randomly pick first key in set
+ pubkey = plc_keys[0]
+ try:
+ pkey = convert_public_key(pubkey['key'])
+ except:
+ self.logger.warn('PlImporter: unable to convert public key for %s' % person_hrn)
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("PlImporter: person %s does not have a PL public key"%person_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+ # new person
+ try:
+ plc_keys = keys_by_person_id.get(person['person_id'],[])
+ if not user_record:
+ (pubkey,pkey) = init_person_key (person, plc_keys )
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer=person['person_id'],
+ authority=get_authority(person_hrn),
+ email=person['email'])
+ if pubkey:
+ user_record.reg_keys=[RegKey (pubkey['key'], pubkey['key_id'])]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("PlImporter: imported person: %s" % user_record)
+ self.remember_record ( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+ def key_in_list (key,sfa_keys):
+ for reg_key in sfa_keys:
+ if reg_key.key==key['key']: return True
+ return False
+ # is there a new key in myplc ?
+ new_keys=False
+ for key in plc_keys:
+ if not key_in_list (key,sfa_keys):
+ new_keys = True
+ if new_keys:
+ (pubkey,pkey) = init_person_key (person, plc_keys)
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey['key'], pubkey['key_id'])]
+ self.logger.info("PlImporter: updated person: %s" % user_record)
+ user_record.email = person['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("PlImporter: failed to import person %d %s"%(person['person_id'],person['email']))
+
+ # import slices
+ for slice_id in site['slice_ids']:
+ try:
+ slice = slices_by_id[slice_id]
+ except:
+ self.logger.warning ("PlImporter: cannot locate slice_id %s - ignored"%slice_id)
+ slice_hrn = slicename_to_hrn(interface_hrn, slice['name'])
+ slice_record = self.locate ('slice', slice_hrn, slice['slice_id'])
+ if not slice_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer=slice['slice_id'],
+ authority=get_authority(slice_hrn))
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+ self.logger.info("PlImporter: imported slice: %s" % slice_record)
+ self.remember_record ( slice_record )
+ except:
+ self.logger.log_exc("PlImporter: failed to import slice")
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+ slice_record.reg_researchers = \
+ [ self.locate_by_type_pointer ('user',user_id) for user_id in slice['person_ids'] ]
+ dbsession.commit()
+ slice_record.stale=False
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+ if ".vini" in interface_hrn and interface_hrn.endswith('vini') and \
+ record.hrn.endswith("internet2"):
+ record.stale=False
+
+ for record in all_records:
+ try: stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("PlImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
+++ /dev/null
-#!/usr/bin/python
-#
-##
-# Import PLC records into the SFA database. It is indended that this tool be
-# run once to create SFA records that reflect the current state of the
-# planetlab database.
-#
-# The import tool assumes that the existing PLC hierarchy should all be part
-# of "planetlab.us" (see the root_auth and level1_auth variables below).
-#
-# Public keys are extracted from the users' SSH keys automatically and used to
-# create GIDs. This is relatively experimental as a custom tool had to be
-# written to perform conversion from SSH to OpenSSL format. It only supports
-# RSA keys at this time, not DSA keys.
-##
-
-import os
-import getopt
-import sys
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
-from sfa.storage.table import SfaTable
-from sfa.storage.record import SfaRecord
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.trust.gid import create_uuid
-from sfa.importer.sfaImport import sfaImport, _cleanup_string
-from sfa.util.sfalogging import logger
-from sfa.openstack.nova_shell import NovaShell
-
-def process_options():
-
- (options, args) = getopt.getopt(sys.argv[1:], '', [])
- for opt in options:
- name = opt[0]
- val = opt[1]
-
-
-def load_keys(filename):
- keys = {}
- tmp_dict = {}
- try:
- execfile(filename, tmp_dict)
- if 'keys' in tmp_dict:
- keys = tmp_dict['keys']
- return keys
- except:
- return keys
-
-def save_keys(filename, keys):
- f = open(filename, 'w')
- f.write("keys = %s" % str(keys))
- f.close()
-
-def main():
-
- process_options()
- config = Config()
- sfaImporter = sfaImport()
- logger=sfaImporter.logger
- logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
- if not config.SFA_REGISTRY_ENABLED:
- sys.exit(0)
- root_auth = config.SFA_REGISTRY_ROOT_AUTH
- interface_hrn = config.SFA_INTERFACE_HRN
- shell = NovaShell(config)
- sfaImporter.create_top_level_records()
-
- # create dict of all existing sfa records
- existing_records = {}
- existing_hrns = []
- key_ids = []
- table = SfaTable()
- results = table.find()
- for result in results:
- existing_records[(result['hrn'], result['type'])] = result
- existing_hrns.append(result['hrn'])
-
-
- # Get all users
- persons = shell.auth_manager.get_users()
- persons_dict = {}
- keys_filename = config.config_path + os.sep + 'person_keys.py'
- old_person_keys = load_keys(keys_filename)
- person_keys = {}
- for person in persons:
- hrn = config.SFA_INTERFACE_HRN + "." + person.id
- persons_dict[hrn] = person
- old_keys = old_person_keys.get(person.id, [])
- keys = [k.public_key for k in shell.db.key_pair_get_all_by_user(person.id)]
- person_keys[person.id] = keys
- update_record = False
- if old_keys != keys:
- update_record = True
- if hrn not in existing_hrns or \
- (hrn, 'user') not in existing_records or update_record:
- urn = hrn_to_urn(hrn, 'user')
-
- if keys:
- try:
- pkey = convert_public_key(keys[0])
- except:
- logger.log_exc('unable to convert public key for %s' % hrn)
- pkey = Keypair(create=True)
- else:
- logger.warn("Import: person %s does not have a PL public key"%hrn)
- pkey = Keypair(create=True)
- person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
- authority=get_authority(hrn))
- logger.info("Import: importing %s " % person_record.summary_string())
- person_record.sync()
-
- # Get all projects
- projects = shell.auth_manager.get_projects()
- projects_dict = {}
- for project in projects:
- hrn = config.SFA_INTERFACE_HRN + '.' + project.id
- projects_dict[hrn] = project
- if hrn not in existing_hrns or \
- (hrn, 'slice') not in existing_records:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(hrn, 'slice')
- project_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- project_record = SfaRecord(hrn=hrn, gid=project_gid, type="slice",
- authority=get_authority(hrn))
- projects_dict[project_record['hrn']] = project_record
- logger.info("Import: importing %s " % project_record.summary_string())
- project_record.sync()
-
- # remove stale records
- system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
- for (record_hrn, type) in existing_records.keys():
- if record_hrn in system_records:
- continue
-
- record = existing_records[(record_hrn, type)]
- if record['peer_authority']:
- continue
-
- if type == 'user':
- if record_hrn in persons_dict:
- continue
- elif type == 'slice':
- if record_hrn in projects_dict:
- continue
- else:
- continue
-
- record_object = existing_records[(record_hrn, type)]
- record = SfaRecord(dict=record_object)
- logger.info("Import: removing %s " % record.summary_string())
- record.delete()
-
- # save pub keys
- logger.info('Import: saving current pub keys')
- save_keys(keys_filename, person_keys)
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-#!/usr/bin/python
-#
-##
-# Import PLC records into the SFA database. It is indended that this tool be
-# run once to create SFA records that reflect the current state of the
-# planetlab database.
-#
-# The import tool assumes that the existing PLC hierarchy should all be part
-# of "planetlab.us" (see the root_auth and level1_auth variables below).
-#
-# Public keys are extracted from the users' SSH keys automatically and used to
-# create GIDs. This is relatively experimental as a custom tool had to be
-# written to perform conversion from SSH to OpenSSL format. It only supports
-# RSA keys at this time, not DSA keys.
-##
-
-import os
-import getopt
-import sys
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.util.plxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_pl_slicename
-from sfa.storage.table import SfaTable
-from sfa.storage.record import SfaRecord
-from sfa.trust.gid import create_uuid
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.importer.sfaImport import sfaImport, _cleanup_string
-from sfa.plc.plshell import PlShell
-
-def process_options():
-
- (options, args) = getopt.getopt(sys.argv[1:], '', [])
- for opt in options:
- name = opt[0]
- val = opt[1]
-
-
-def load_keys(filename):
- keys = {}
- tmp_dict = {}
- try:
- execfile(filename, tmp_dict)
- if 'keys' in tmp_dict:
- keys = tmp_dict['keys']
- return keys
- except:
- return keys
-
-def save_keys(filename, keys):
- f = open(filename, 'w')
- f.write("keys = %s" % str(keys))
- f.close()
-
-def _get_site_hrn(interface_hrn, site):
- # Hardcode 'internet2' into the hrn for sites hosting
- # internet2 nodes. This is a special operation for some vini
- # sites only
- hrn = ".".join([interface_hrn, site['login_base']])
- if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
- if site['login_base'].startswith("i2") or site['login_base'].startswith("nlr"):
- hrn = ".".join([interface_hrn, "internet2", site['login_base']])
- return hrn
-
-def main():
-
- process_options()
- config = Config()
- if not config.SFA_REGISTRY_ENABLED:
- sys.exit(0)
- root_auth = config.SFA_REGISTRY_ROOT_AUTH
- interface_hrn = config.SFA_INTERFACE_HRN
- keys_filename = config.config_path + os.sep + 'person_keys.py'
- sfaImporter = sfaImport()
- sfaImporter.create_top_level_records()
- logger=sfaImporter.logger
- logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
- shell = PlShell (config)
-
- # create dict of all existing sfa records
- existing_records = {}
- existing_hrns = []
- key_ids = []
- person_keys = {}
- table = SfaTable()
- results = table.find()
- for result in results:
- existing_records[(result['hrn'], result['type'])] = result
- existing_hrns.append(result['hrn'])
-
- # Get all plc sites
- sites = shell.GetSites({'peer_id': None})
- sites_dict = {}
- for site in sites:
- sites_dict[site['login_base']] = site
-
- # Get all plc users
- persons = shell.GetPersons({'peer_id': None, 'enabled': True},
- ['person_id', 'email', 'key_ids', 'site_ids'])
- persons_dict = {}
- for person in persons:
- persons_dict[person['person_id']] = person
- key_ids.extend(person['key_ids'])
-
- # Get all public keys
- keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids})
- keys_dict = {}
- for key in keys:
- keys_dict[key['key_id']] = key['key']
-
- # create a dict of person keys keyed on key_id
- old_person_keys = load_keys(keys_filename)
- for person in persons:
- pubkeys = []
- for key_id in person['key_ids']:
- pubkeys.append(keys_dict[key_id])
- person_keys[person['person_id']] = pubkeys
-
- # Get all plc nodes
- nodes = shell.GetNodes( {'peer_id': None}, ['node_id', 'hostname', 'site_id'])
- nodes_dict = {}
- for node in nodes:
- nodes_dict[node['node_id']] = node
-
- # Get all plc slices
- slices = shell.GetSlices( {'peer_id': None}, ['slice_id', 'name'])
- slices_dict = {}
- for slice in slices:
- slices_dict[slice['slice_id']] = slice
-
- # special case for vini
- if ".vini" in interface_hrn and interface_hrn.endswith('vini'):
- # create a fake internet2 site first
- i2site = {'name': 'Internet2', 'abbreviated_name': 'I2',
- 'login_base': 'internet2', 'site_id': -1}
- site_hrn = _get_site_hrn(interface_hrn, i2site)
- logger.info("Importing site: %s" % site_hrn)
- # import if hrn is not in list of existing hrns or if the hrn exists
- # but its not a site record
- if site_hrn not in existing_hrns or \
- (site_hrn, 'authority') not in existing_records:
- logger.info("Import: site %s " % site_hrn)
- urn = hrn_to_urn(site_hrn, 'authority')
- if not sfaImporter.AuthHierarchy.auth_exists(urn):
- sfaImporter.AuthHierarchy.create_auth(urn)
- auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn)
- auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \
- type="authority", pointer=site['site_id'],
- authority=get_authority(site_hrn))
- auth_record.sync(verbose=True)
-
- # start importing
- for site in sites:
- site_hrn = _get_site_hrn(interface_hrn, site)
- logger.info("Importing site: %s" % site_hrn)
-
- # import if hrn is not in list of existing hrns or if the hrn exists
- # but its not a site record
- if site_hrn not in existing_hrns or \
- (site_hrn, 'authority') not in existing_records:
- try:
- logger.info("Import: site %s " % site_hrn)
- urn = hrn_to_urn(site_hrn, 'authority')
- if not sfaImporter.AuthHierarchy.auth_exists(urn):
- sfaImporter.AuthHierarchy.create_auth(urn)
- auth_info = sfaImporter.AuthHierarchy.get_auth_info(urn)
- auth_record = SfaRecord(hrn=site_hrn, gid=auth_info.get_gid_object(), \
- type="authority", pointer=site['site_id'],
- authority=get_authority(site_hrn))
- logger.info("Import: importing site: %s" % auth_record.summary_string())
- auth_record.sync()
- except:
- # if the site import fails then there is no point in trying to import the
- # site's child records (node, slices, persons), so skip them.
- logger.log_exc("Import: failed to import site. Skipping child records")
- continue
-
- # import node records
- for node_id in site['node_ids']:
- if node_id not in nodes_dict:
- continue
- node = nodes_dict[node_id]
- site_auth = get_authority(site_hrn)
- site_name = get_leaf(site_hrn)
- hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
- if len(hrn) > 64:
- hrn = hrn[:64]
- if hrn not in existing_hrns or \
- (hrn, 'node') not in existing_records:
- try:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(hrn, 'node')
- node_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- node_record = SfaRecord(hrn=hrn, gid=node_gid, type="node", pointer=node['node_id'], authority=get_authority(hrn))
- logger.info("Import: importing node: %s" % node_record.summary_string())
- node_record.sync()
- except:
- logger.log_exc("Import: failed to import node")
-
-
- # import slices
- for slice_id in site['slice_ids']:
- if slice_id not in slices_dict:
- continue
- slice = slices_dict[slice_id]
- hrn = slicename_to_hrn(interface_hrn, slice['name'])
- #slicename = slice['name'].split("_",1)[-1]
- #slicename = _cleanup_string(slicename)
- if hrn not in existing_hrns or \
- (hrn, 'slice') not in existing_records:
- try:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(hrn, 'slice')
- slice_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- slice_record = SfaRecord(hrn=hrn, gid=slice_gid, type="slice", pointer=slice['slice_id'],
- authority=get_authority(hrn))
- logger.info("Import: importing slice: %s" % slice_record.summary_string())
- slice_record.sync()
- except:
- logger.log_exc("Import: failed to import slice")
-
- # import persons
- for person_id in site['person_ids']:
- if person_id not in persons_dict:
- continue
- person = persons_dict[person_id]
- hrn = email_to_hrn(site_hrn, person['email'])
- if len(hrn) > 64:
- hrn = hrn[:64]
-
- # if user's primary key has chnaged then we need to update the
- # users gid by forcing a update here
- old_keys = []
- new_keys = []
- if person_id in old_person_keys:
- old_keys = old_person_keys[person_id]
- if person_id in person_keys:
- new_keys = person_keys[person_id]
- update_record = False
- for key in new_keys:
- if key not in old_keys:
- update_record = True
-
- if hrn not in existing_hrns or \
- (hrn, 'user') not in existing_records or update_record:
- try:
- if 'key_ids' in person and person['key_ids']:
- key = new_keys[0]
- try:
- pkey = convert_public_key(key)
- except:
- logger.warn('unable to convert public key for %s' % hrn)
- pkey = Keypair(create=True)
- else:
- # the user has no keys. Creating a random keypair for the user's gid
- logger.warn("Import: person %s does not have a PL public key"%hrn)
- pkey = Keypair(create=True)
- urn = hrn_to_urn(hrn, 'user')
- person_gid = sfaImporter.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- person_record = SfaRecord(hrn=hrn, gid=person_gid, type="user", \
- pointer=person['person_id'], authority=get_authority(hrn))
- logger.info("Import: importing person: %s" % person_record.summary_string())
- person_record.sync()
- except:
- logger.log_exc("Import: failed to import person.")
-
- # remove stale records
- system_records = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
- for (record_hrn, type) in existing_records.keys():
- if record_hrn in system_records:
- continue
-
- record = existing_records[(record_hrn, type)]
- if record['peer_authority']:
- continue
-
- # dont delete vini's internet2 placeholdder record
- # normally this would be deleted becuase it does not have a plc record
- if ".vini" in interface_hrn and interface_hrn.endswith('vini') and \
- record_hrn.endswith("internet2"):
- continue
-
- found = False
-
- if type == 'authority':
- for site in sites:
- site_hrn = interface_hrn + "." + site['login_base']
- if site_hrn == record_hrn and site['site_id'] == record['pointer']:
- found = True
- break
-
- elif type == 'user':
- login_base = get_leaf(get_authority(record_hrn))
- username = get_leaf(record_hrn)
- if login_base in sites_dict:
- site = sites_dict[login_base]
- for person in persons:
- tmp_username = person['email'].split("@")[0]
- alt_username = person['email'].split("@")[0].replace(".", "_").replace("+", "_")
- if username in [tmp_username, alt_username] and \
- site['site_id'] in person['site_ids'] and \
- person['person_id'] == record['pointer']:
- found = True
- break
-
- elif type == 'slice':
- slicename = hrn_to_pl_slicename(record_hrn)
- for slice in slices:
- if slicename == slice['name'] and \
- slice['slice_id'] == record['pointer']:
- found = True
- break
-
- elif type == 'node':
- login_base = get_leaf(get_authority(record_hrn))
- nodename = Xrn.unescape(get_leaf(record_hrn))
- if login_base in sites_dict:
- site = sites_dict[login_base]
- for node in nodes:
- tmp_nodename = node['hostname']
- if tmp_nodename == nodename and \
- node['site_id'] == site['site_id'] and \
- node['node_id'] == record['pointer']:
- found = True
- break
- else:
- continue
-
- if not found:
- try:
- record_object = existing_records[(record_hrn, type)]
- record = SfaRecord(dict=record_object)
- logger.info("Import: deleting record: %s" % record.summary_string())
- record.delete()
- except:
- logger.log_exc("Import: failded to delete record")
- # save pub keys
- logger.info('Import: saving current pub keys')
- save_keys(keys_filename, person_keys)
-
-if __name__ == "__main__":
- main()
--- /dev/null
+#!/usr/bin/python
+
+import sys
+
+from optparse import OptionParser
+
+from sfa.generic import Generic
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import _SfaLogger
+
+from sfa.trust.hierarchy import Hierarchy
+
+from sfa.importer.sfaimporter import SfaImporter
+
+COMMAND=sys.argv[0]
+
+def main ():
+
+ config = Config()
+ logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
+ logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
+ if not config.SFA_REGISTRY_ENABLED:
+ logger.critical("COMMAND: need SFA_REGISTRY_ENABLED to run import")
+
+ # testbed-neutral : create local certificates and the like
+ auth_hierarchy = Hierarchy ()
+ sfa_importer = SfaImporter(auth_hierarchy, logger)
+ # testbed-specific
+ testbed_importer = None
+ generic=Generic.the_flavour()
+ importer_class = generic.importer_class()
+ if importer_class:
+ logger.info ("Using flavour %s for importing (class %s)"%\
+ (generic.flavour,importer_class.__name__))
+ testbed_importer = importer_class (auth_hierarchy, logger)
+
+ parser = OptionParser ()
+ sfa_importer.add_options (parser)
+ if testbed_importer:
+ testbed_importer.add_options (parser)
+
+ (options, args) = parser.parse_args ()
+ # no args supported ?
+ if args:
+ parser.print_help()
+ sys.exit(1)
+
+ sfa_importer.run (options)
+ if testbed_importer:
+ testbed_importer.run (parser)
+
+
+if __name__ == '__main__':
+ main()
+
from sfa.util.sfalogging import logger
-from sfa.storage.table import SfaTable
+from sfa.storage.alchemy import engine
+from sfa.storage.dbschema import DBSchema
def main():
- usage="%prog: trash the registry DB (the 'sfa' table in the 'planetlab5' database)"
+ usage="%prog: trash the registry DB"
parser = OptionParser(usage=usage)
- parser.add_option('-f','--file-system',dest='clean_fs',action='store_true',default=False,
- help='Clean up the /var/lib/sfa/authorities area as well')
- parser.add_option('-c','--certs',dest='clean_certs',action='store_true',default=False,
- help='Remove all cached certs/gids found in /var/lib/sfa/authorities area as well')
+ parser.add_option("-f","--file-system",dest='clean_fs',action='store_true',default=False,
+ help="Clean up the /var/lib/sfa/authorities area as well")
+ parser.add_option("-c","--certs",dest='clean_certs',action='store_true',default=False,
+ help="Remove all cached certs/gids found in /var/lib/sfa/authorities area as well")
+ parser.add_option("-0","--no-reinit",dest='reinit',action='store_false',default=True,
+ help="By default a new DB schema is installed after the cleanup; this option prevents that")
(options,args)=parser.parse_args()
if args:
parser.print_help()
sys.exit(1)
+ dbschema=DBSchema()
logger.info("Purging SFA records from database")
- table = SfaTable()
- table.nuke()
+ dbschema.nuke()
+ # for convenience we re-create the schema here, so there's no need for an explicit
+ # service sfa restart
+ # however in some (upgrade) scenarios this might be wrong
+ if options.reinit:
+ logger.info("re-creating empty schema")
+ dbschema.init_or_upgrade()
if options.clean_certs:
# remove the server certificate and all gids found in /var/lib/sfa/authorities
+++ /dev/null
-#
-# The import tool assumes that the existing PLC hierarchy should all be part
-# of "planetlab.us" (see the root_auth and level1_auth variables below).
-#
-# Public keys are extracted from the users' SSH keys automatically and used to
-# create GIDs. This is relatively experimental as a custom tool had to be
-# written to perform conversion from SSH to OpenSSL format. It only supports
-# RSA keys at this time, not DSA keys.
-##
-
-from sfa.util.sfalogging import _SfaLogger
-from sfa.util.xrn import get_authority, hrn_to_urn
-from sfa.util.plxrn import email_to_hrn
-from sfa.util.config import Config
-from sfa.trust.certificate import convert_public_key, Keypair
-from sfa.trust.trustedroots import TrustedRoots
-from sfa.trust.hierarchy import Hierarchy
-from sfa.trust.gid import create_uuid
-from sfa.storage.table import SfaTable
-from sfa.storage.record import SfaRecord
-
-
-def _un_unicode(str):
- if isinstance(str, unicode):
- return str.encode("ascii", "ignore")
- else:
- return str
-
-def _cleanup_string(str):
- # pgsql has a fit with strings that have high ascii in them, so filter it
- # out when generating the hrns.
- tmp = ""
- for c in str:
- if ord(c) < 128:
- tmp = tmp + c
- str = tmp
-
- str = _un_unicode(str)
- str = str.replace(" ", "_")
- str = str.replace(".", "_")
- str = str.replace("(", "_")
- str = str.replace("'", "_")
- str = str.replace(")", "_")
- str = str.replace('"', "_")
- return str
-
-class sfaImport:
-
- def __init__(self):
- self.logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog')
- self.AuthHierarchy = Hierarchy()
-# self.table = SfaTable()
- self.config = Config()
- self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(self.config))
- self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
-
- def create_top_level_records(self):
- """
- Create top level and interface records
- """
- # create root authority
- interface_hrn = self.config.SFA_INTERFACE_HRN
- self.create_top_level_auth_records(interface_hrn)
-
- # create s user record for the slice manager
- self.create_sm_client_record()
-
- # create interface records
- self.logger.info("Import: creating interface records")
- self.create_interface_records()
-
- # add local root authority's cert to trusted list
- self.logger.info("Import: adding " + interface_hrn + " to trusted list")
- authority = self.AuthHierarchy.get_auth_info(interface_hrn)
- self.TrustedRoots.add_gid(authority.get_gid_object())
-
- def create_top_level_auth_records(self, hrn):
- """
- Create top level db records (includes root and sub authorities (local/remote)
- """
- # make sure parent exists
- parent_hrn = get_authority(hrn)
- if not parent_hrn:
- parent_hrn = hrn
- if not parent_hrn == hrn:
- self.create_top_level_auth_records(parent_hrn)
-
- # enxure key and cert exists:
- self.AuthHierarchy.create_top_level_auth(hrn)
- # create the db record if it doesnt already exist
- auth_info = self.AuthHierarchy.get_auth_info(hrn)
- auth_record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), type="authority", pointer=-1, authority=get_authority(hrn))
- self.logger.info("Import: importing %s " % auth_record.summary_string())
- auth_record.sync()
-
- def create_sm_client_record(self):
- """
- Create a user record for the Slicemanager service.
- """
- hrn = self.config.SFA_INTERFACE_HRN + '.slicemanager'
- urn = hrn_to_urn(hrn, 'user')
- if not self.AuthHierarchy.auth_exists(urn):
- self.logger.info("Import: creating Slice Manager user")
- self.AuthHierarchy.create_auth(urn)
-
- auth_info = self.AuthHierarchy.get_auth_info(hrn)
- record = SfaRecord(hrn=hrn, gid=auth_info.get_gid_object(), \
- type="user", pointer=-1, authority=get_authority(hrn))
- self.logger.info("Import: importing %s " % record.summary_string())
- record.sync()
-
- def create_interface_records(self):
- """
- Create a record for each SFA interface
- """
- # just create certs for all sfa interfaces even if they
- # arent enabled
- hrn = self.config.SFA_INTERFACE_HRN
- interfaces = ['authority+sa', 'authority+am', 'authority+sm']
- table = SfaTable()
- auth_info = self.AuthHierarchy.get_auth_info(hrn)
- pkey = auth_info.get_pkey_object()
- for interface in interfaces:
- urn = hrn_to_urn(hrn, interface)
- gid = self.AuthHierarchy.create_gid(urn, create_uuid(), pkey)
- interface_record = SfaRecord(hrn=hrn, type=interface, pointer=-1,
- gid = gid, authority=get_authority(hrn))
- self.logger.info("Import: importing %s " % interface_record.summary_string())
- interface_record.sync()
-
- def delete_record(self, hrn, type):
- # delete the record
- table = SfaTable()
- record_list = table.find({'type': type, 'hrn': hrn})
- for record in record_list:
- self.logger.info("Import: removing record %s %s" % (type, hrn))
- table.remove(record)
--- /dev/null
+#
+# Public keys are extracted from the users' SSH keys automatically and used to
+# create GIDs. This is relatively experimental as a custom tool had to be
+# written to perform conversion from SSH to OpenSSL format. It only supports
+# RSA keys at this time, not DSA keys.
+##
+
+from sfa.util.xrn import get_authority, hrn_to_urn
+from sfa.util.plxrn import email_to_hrn
+from sfa.util.config import Config
+from sfa.trust.certificate import convert_public_key, Keypair
+from sfa.trust.trustedroots import TrustedRoots
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegUser
+
+def _un_unicode(str):
+ if isinstance(str, unicode):
+ return str.encode("ascii", "ignore")
+ else:
+ return str
+
+def _cleanup_string(str):
+ # pgsql has a fit with strings that have high ascii in them, so filter it
+ # out when generating the hrns.
+ tmp = ""
+ for c in str:
+ if ord(c) < 128:
+ tmp = tmp + c
+ str = tmp
+
+ str = _un_unicode(str)
+ str = str.replace(" ", "_")
+ str = str.replace(".", "_")
+ str = str.replace("(", "_")
+ str = str.replace("'", "_")
+ str = str.replace(")", "_")
+ str = str.replace('"', "_")
+ return str
+
+class SfaImporter:
+
+ def __init__(self, auth_hierarchy, logger):
+ self.logger=logger
+ self.auth_hierarchy = auth_hierarchy
+ config = Config()
+ self.TrustedRoots = TrustedRoots(Config.get_trustedroots_dir(config))
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.interface_hrn = config.SFA_INTERFACE_HRN
+
+ # check before creating a RegRecord entry as we run this over and over
+ def record_exists (self, type, hrn):
+ return dbsession.query(RegRecord).filter_by(hrn=hrn,type=type).count()!=0
+
+ # record options into an OptionParser
+ def add_options (self, parser):
+ # no generic option
+ pass
+
+ def run (self, options):
+ self.logger.info ("SfaImporter.run : no options used")
+ self.create_top_level_records()
+
+ def create_top_level_records(self):
+ """
+ Create top level and interface records
+ """
+ # create root authority
+ self.create_top_level_auth_records(self.interface_hrn)
+
+ # create s user record for the slice manager
+ self.create_sm_client_record()
+
+ # create interface records
+ # xxx turning off the creation of authority+*
+ # in fact his is required - used in SfaApi._getCredentialRaw
+ # that tries to locate 'authority+sa'
+ self.create_interface_records()
+
+ # add local root authority's cert to trusted list
+ self.logger.info("SfaImporter: adding " + self.interface_hrn + " to trusted list")
+ authority = self.auth_hierarchy.get_auth_info(self.interface_hrn)
+ self.TrustedRoots.add_gid(authority.get_gid_object())
+
+ def create_top_level_auth_records(self, hrn):
+ """
+ Create top level db records (includes root and sub authorities (local/remote)
+ """
+ # make sure parent exists
+ parent_hrn = get_authority(hrn)
+ if not parent_hrn:
+ parent_hrn = hrn
+ if not parent_hrn == hrn:
+ self.create_top_level_auth_records(parent_hrn)
+
+ # ensure key and cert exists:
+ self.auth_hierarchy.create_top_level_auth(hrn)
+ # create the db record if it doesnt already exist
+ if self.record_exists ('authority',hrn): return
+ auth_info = self.auth_hierarchy.get_auth_info(hrn)
+ auth_record = RegAuthority(hrn=hrn, gid=auth_info.get_gid_object(),
+ authority=get_authority(hrn))
+ auth_record.just_created()
+ dbsession.add (auth_record)
+ dbsession.commit()
+ self.logger.info("SfaImporter: imported authority (parent) %s " % auth_record)
+
+ def create_sm_client_record(self):
+ """
+ Create a user record for the Slicemanager service.
+ """
+ hrn = self.interface_hrn + '.slicemanager'
+ urn = hrn_to_urn(hrn, 'user')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.logger.info("SfaImporter: creating Slice Manager user")
+ self.auth_hierarchy.create_auth(urn)
+
+ if self.record_exists ('user',hrn): return
+ auth_info = self.auth_hierarchy.get_auth_info(hrn)
+ user_record = RegUser(hrn=hrn, gid=auth_info.get_gid_object(),
+ authority=get_authority(hrn))
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("SfaImporter: importing user (slicemanager) %s " % user_record)
+
+ def create_interface_records(self):
+ """
+ Create a record for each SFA interface
+ """
+ # just create certs for all sfa interfaces even if they
+ # aren't enabled
+ auth_info = self.auth_hierarchy.get_auth_info(self.interface_hrn)
+ pkey = auth_info.get_pkey_object()
+ hrn=self.interface_hrn
+ for type in [ 'authority+sa', 'authority+am', 'authority+sm', ]:
+ urn = hrn_to_urn(hrn, type)
+ gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ # for now we have to preserve the authority+<> stuff
+ if self.record_exists (type,hrn): continue
+ interface_record = RegAuthority(type=type, hrn=hrn, gid=gid,
+ authority=get_authority(hrn))
+ interface_record.just_created()
+ dbsession.add (interface_record)
+ dbsession.commit()
+ self.logger.info("SfaImporter: imported authority (%s) %s " % (type,interface_record))
+
import types
-import time
# for get_key_from_incoming_ip
import tempfile
import os
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
-from sfa.storage.record import SfaRecord
-from sfa.storage.table import SfaTable
+from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey
+from sfa.storage.alchemy import dbsession
class RegistryManager:
auth_hrn = api.auth.get_authority(hrn)
if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
auth_hrn = hrn
- # get record info
auth_info = api.auth.get_auth_info(auth_hrn)
- table = SfaTable()
- records = table.findObjects({'type': type, 'hrn': hrn})
- if not records:
- raise RecordNotFound(hrn)
- record = records[0]
+ # get record info
+ record=dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+ if not record:
+ raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
- self.driver.augment_records_with_testbed_info (record)
- if not self.driver.is_enabled (record):
- raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+ logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
+ self.driver.augment_records_with_testbed_info (record.__dict__)
+ logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
+ if not self.driver.is_enabled (record.__dict__):
+ raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
# get the callers gid
# if this is a self cred the record's gid is the caller's gid
caller_hrn = caller_gid.get_hrn()
object_hrn = record.get_gid_object().get_hrn()
- rights = api.auth.determine_user_rights(caller_hrn, record)
+ rights = api.auth.determine_user_rights(caller_hrn, record.__dict__)
# make sure caller has rights to this object
if rights.is_empty():
raise PermissionError("%s has no rights to %s (%s)" % \
(caller_hrn, object_hrn, xrn))
- object_gid = GID(string=record['gid'])
+ object_gid = GID(string=record.gid)
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(caller_gid)
new_cred.set_gid_object(object_gid)
#new_cred.set_pubkey(object_gid.get_pubkey())
new_cred.set_privileges(rights)
new_cred.get_privileges().delegate_all_privileges(True)
- if 'expires' in record:
- date = utcparse(record['expires'])
+ if hasattr(record,'expires'):
+ date = utcparse(record.expires)
expires = datetime_to_epoch(date)
new_cred.set_expiration(int(expires))
auth_kind = "authority,ma,sa"
type = Xrn(xrns).get_type()
xrns = [xrns]
hrns = [urn_to_hrn(xrn)[0] for xrn in xrns]
+
# load all known registry names into a prefix tree and attempt to find
# the longest matching prefix
- # create a dict where key is a registry hrn and its value is a
- # hrns at that registry (determined by the known prefix tree).
+ # create a dict where key is a registry hrn and its value is a list
+ # of hrns at that registry (determined by the known prefix tree).
xrn_dict = {}
registries = api.registries
tree = prefixTree()
credential = api.getCredential()
interface = api.registries[registry_hrn]
server_proxy = api.server_proxy(interface, credential)
- peer_records = server_proxy.Resolve(xrns, credential)
- records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+ peer_records = server_proxy.Resolve(xrns, credential,type)
+ # pass foreign records as-is
+ # previous code used to read
+ # records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
+ # not sure why the records coming through xmlrpc had to be processed at all
+ records.extend(peer_records)
# try resolving the remaining unfound records at the local registry
local_hrns = list ( set(hrns).difference([record['hrn'] for record in records]) )
#
- table = SfaTable()
- local_records = table.findObjects({'hrn': local_hrns})
+ local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns))
+ if type:
+ local_records = local_records.filter_by(type=type)
+ local_records=local_records.all()
+ logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type))
+ local_dicts = [ record.__dict__ for record in local_records ]
if full:
# in full mode we get as much info as we can, which involves contacting the
# testbed for getting implementation details about the record
- self.driver.augment_records_with_testbed_info(local_records)
+ self.driver.augment_records_with_testbed_info(local_dicts)
# also we fill the 'url' field for known authorities
# used to be in the driver code, sounds like a poorman thing though
def solve_neighbour_url (record):
- if not record['type'].startswith('authority'): return
- hrn=record['hrn']
+ if not record.type.startswith('authority'): return
+ hrn=record.hrn
for neighbour_dict in [ api.aggregates, api.registries ]:
if hrn in neighbour_dict:
- record['url']=neighbour_dict[hrn].get_url()
+ record.url=neighbour_dict[hrn].get_url()
return
- [ solve_neighbour_url (record) for record in local_records ]
-
-
+ for record in local_records: solve_neighbour_url (record)
- # convert local record objects to dicts
- records.extend([dict(record) for record in local_records])
- if type:
- records = filter(lambda rec: rec['type'] in [type], records)
-
+ # convert local record objects to dicts for xmlrpc
+ # xxx somehow here calling dict(record) issues a weird error
+ # however record.todict() seems to work fine
+ # records.extend( [ dict(record) for record in local_records ] )
+ records.extend( [ record.todict() for record in local_records ] )
if not records:
raise RecordNotFound(str(hrns))
return records
- def List(self, api, xrn, origin_hrn=None):
+ def List (self, api, xrn, origin_hrn=None):
hrn, type = urn_to_hrn(xrn)
# load all know registry names into a prefix tree and attempt to find
# the longest matching prefix
- records = []
registries = api.registries
registry_hrns = registries.keys()
tree = prefixTree()
raise MissingAuthority(xrn)
# if the best match (longest matching hrn) is not the local registry,
# forward the request
- records = []
+ record_dicts = []
if registry_hrn != api.hrn:
credential = api.getCredential()
interface = api.registries[registry_hrn]
server_proxy = api.server_proxy(interface, credential)
record_list = server_proxy.List(xrn, credential)
- records = [SfaRecord(dict=record).as_dict() for record in record_list]
+ # same as above, no need to process what comes from through xmlrpc
+ # pass foreign records as-is
+ record_dicts = record_list
# if we still have not found the record yet, try the local registry
- if not records:
+ if not record_dicts:
if not api.auth.hierarchy.auth_exists(hrn):
raise MissingAuthority(hrn)
+ records = dbsession.query(RegRecord).filter_by(authority=hrn)
+ record_dicts=[ record.todict() for record in records ]
- table = SfaTable()
- records = table.find({'authority': hrn})
-
- return records
+ return record_dicts
def CreateGid(self, api, xrn, cert):
# subject_record describes the subject of the relationships
# ref_record contains the target values for the various relationships we need to manage
# (to begin with, this is just the slice x person relationship)
- def update_relations (self, subject_record, ref_record):
- type=subject_record['type']
+ def update_relations (self, subject_obj, ref_obj):
+ type=subject_obj.type
if type=='slice':
- self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user')
+ self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user')
# field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
# hrns is the list of hrns that should be linked to the subject from now on
# target_type would be e.g. 'user' in the 'slice' x 'researcher' example
- def update_relation (self, sfa_record, field_key, hrns, target_type):
+ def update_relation (self, record_obj, field_key, hrns, target_type):
# locate the linked objects in our db
- subject_type=sfa_record['type']
- subject_id=sfa_record['pointer']
- table = SfaTable()
- link_sfa_records = table.find ({'type':target_type, 'hrn': hrns})
- link_ids = [ rec.get('pointer') for rec in link_sfa_records ]
+ subject_type=record_obj.type
+ subject_id=record_obj.pointer
+ # get the 'pointer' field of all matching records
+ link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all()
+ # sqlalchemy returns named tuples for columns
+ link_ids = [ tuple.pointer for tuple in link_id_tuples ]
self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
-
- def Register(self, api, record):
+ def Register(self, api, record_dict):
- hrn, type = record['hrn'], record['type']
+ hrn, type = record_dict['hrn'], record_dict['type']
urn = hrn_to_urn(hrn,type)
# validate the type
if type not in ['authority', 'slice', 'node', 'user']:
raise UnknownSfaType(type)
- # check if record already exists
- table = SfaTable()
- existing_records = table.find({'type': type, 'hrn': hrn})
+ # check if record_dict already exists
+ existing_records = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).all()
if existing_records:
raise ExistingRecord(hrn)
- record = SfaRecord(dict = record)
- record['authority'] = get_authority(record['hrn'])
- auth_info = api.auth.get_auth_info(record['authority'])
+ assert ('type' in record_dict)
+ # returns the right type of RegRecord according to type in record
+ record = make_record(dict=record_dict)
+ record.just_created()
+ record.authority = get_authority(record.hrn)
+ auth_info = api.auth.get_auth_info(record.authority)
pub_key = None
# make sure record has a gid
- if 'gid' not in record:
+ if not record.gid:
uuid = create_uuid()
pkey = Keypair(create=True)
- if 'keys' in record and record['keys']:
- pub_key=record['keys']
+ if getattr(record,'keys',None):
+ pub_key=record.keys
# use only first key in record
- if isinstance(record['keys'], types.ListType):
- pub_key = record['keys'][0]
+ if isinstance(record.keys, types.ListType):
+ pub_key = record.keys[0]
pkey = convert_public_key(pub_key)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
gid = gid_object.save_to_string(save_parents=True)
- record['gid'] = gid
- record.set_gid(gid)
+ record.gid = gid
- if type in ["authority"]:
+ if isinstance (record, RegAuthority):
# update the tree
if not api.auth.hierarchy.auth_exists(hrn):
api.auth.hierarchy.create_auth(hrn_to_urn(hrn,'authority'))
# get the GID from the newly created authority
gid = auth_info.get_gid_object()
- record.set_gid(gid.save_to_string(save_parents=True))
+ record.gid=gid.save_to_string(save_parents=True)
+ elif isinstance (record, RegSlice):
+ # locate objects for relationships
+ if hasattr (record, 'researcher'):
+ # we get the list of researcher hrns as
+ researcher_hrns = record.researcher
+ # strip that in case we have <researcher> words </researcher>
+ researcher_hrns = [ x.strip() for x in researcher_hrns ]
+ logger.info ("incoming researchers %s"%researcher_hrns)
+ request = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns))
+ logger.info ("%d incoming hrns, %d matches found"%(len(researcher_hrns),request.count()))
+ researchers = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns)).all()
+ record.reg_researchers = researchers
+
+ elif isinstance (record, RegUser):
+ # create RegKey objects for incoming keys
+ if hasattr(record,'keys'):
+ logger.debug ("creating %d keys for user %s"%(len(record.keys),record.hrn))
+ record.reg_keys = [ RegKey (key) for key in record.keys ]
+
# update testbed-specific data if needed
- pointer = self.driver.register (record, hrn, pub_key)
+ pointer = self.driver.register (record.__dict__, hrn, pub_key)
- record.set_pointer(pointer)
- record_id = table.insert(record)
- record['record_id'] = record_id
+ record.pointer=pointer
+ dbsession.add(record)
+ dbsession.commit()
# update membership for researchers, pis, owners, operators
self.update_relations (record, record)
return record.get_gid_object().save_to_string(save_parents=True)
def Update(self, api, record_dict):
- new_record = SfaRecord(dict = record_dict)
- type = new_record['type']
- hrn = new_record['hrn']
- urn = hrn_to_urn(hrn,type)
- table = SfaTable()
+ assert ('type' in record_dict)
+ new_record=RegRecord(dict=record_dict)
+ type = new_record.type
+ hrn = new_record.hrn
+
# make sure the record exists
- records = table.findObjects({'type': type, 'hrn': hrn})
- if not records:
- raise RecordNotFound(hrn)
- record = records[0]
- record['last_updated'] = time.gmtime()
+ record = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+ if not record:
+ raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
+ record.just_updated()
# validate the type
if type not in ['authority', 'slice', 'node', 'user']:
# Use the pointer from the existing record, not the one that the user
# gave us. This prevents the user from inserting a forged pointer
- pointer = record['pointer']
+ pointer = record.pointer
# is the a change in keys ?
new_key=None
if type=='user':
- if 'keys' in new_record and new_record['keys']:
- new_key=new_record['keys']
+ if getattr(new_key,'keys',None):
+ new_key=new_record.keys
if isinstance (new_key,types.ListType):
new_key=new_key[0]
# update the PLC information that was specified with the record
- if not self.driver.update (record, new_record, hrn, new_key):
+ if not self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key):
logger.warning("driver.update failed")
# take new_key into account
# update the openssl key and gid
pkey = convert_public_key(new_key)
uuid = create_uuid()
+ urn = hrn_to_urn(hrn,type)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
gid = gid_object.save_to_string(save_parents=True)
- record['gid'] = gid
- record = SfaRecord(dict=record)
- table.update(record)
+ record.gid = gid
+ dsession.commit()
# update membership for researchers, pis, owners, operators
self.update_relations (record, new_record)
# expecting an Xrn instance
def Remove(self, api, xrn, origin_hrn=None):
-
- table = SfaTable()
- filter = {'hrn': xrn.get_hrn()}
hrn=xrn.get_hrn()
type=xrn.get_type()
+ request=dbsession.query(RegRecord).filter_by(hrn=hrn)
if type and type not in ['all', '*']:
- filter['type'] = type
+ request=request.filter_by(type=type)
- records = table.find(filter)
- if not records: raise RecordNotFound(hrn)
- record = records[0]
- type = record['type']
-
+ record = request.first()
+ if not record:
+ msg="Could not find hrn %s"%hrn
+ if type: msg += " type=%s"%type
+ raise RecordNotFound(msg)
+
+ type = record.type
if type not in ['slice', 'user', 'node', 'authority'] :
raise UnknownSfaType(type)
# call testbed callback first
# IIUC this is done on the local testbed TOO because of the refreshpeer link
- if not self.driver.remove(record):
+ if not self.driver.remove(record.__dict__):
logger.warning("driver.remove failed")
# delete from sfa db
- table.remove(record)
+ dbsession.delete(record)
+ dbsession.commit()
return 1
- # This is a PLC-specific thing...
+ # This is a PLC-specific thing, won't work with other platforms
def get_key_from_incoming_ip (self, api):
# verify that the callers's ip address exist in the db and is an interface
# for a node in the db
node = nodes[0]
# look up the sfa record
- table = SfaTable()
- records = table.findObjects({'type': 'node', 'pointer': node['node_id']})
- if not records:
- raise RecordNotFound("pointer:" + str(node['node_id']))
- record = records[0]
+ record=dbsession.query(RegRecord).filter_by(type='node',pointer=node['node_id']).first()
+ if not record:
+ raise RecordNotFound("node with pointer %s"%node['node_id'])
# generate a new keypair and gid
uuid = create_uuid()
pkey = Keypair(create=True)
- urn = hrn_to_urn(record['hrn'], record['type'])
+ urn = hrn_to_urn(record.hrn, record.type)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
gid = gid_object.save_to_string(save_parents=True)
- record['gid'] = gid
- record.set_gid(gid)
+ record.gid = gid
# update the record
- table.update(record)
+ dbsession.commit()
# attempt the scp the key
# and gid onto the node
import types
-import time
# for get_key_from_incoming_ip
import tempfile
import os
from sfa.util.plxrn import hrn_to_pl_login_base
from sfa.util.version import version_core
from sfa.util.sfalogging import logger
+
from sfa.trust.gid import GID
from sfa.trust.credential import Credential
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
-from sfa.storage.record import SfaRecord
-from sfa.storage.table import SfaTable
-from sfa.managers import registry_manager
-class RegistryManager(registry_manager.RegistryManager):
+from sfa.storage.model import make_record,RegRecord
+from sfa.storage.alchemy import dbsession
+
+from sfa.managers.registry_manager import RegistryManager
+
+class RegistryManager(RegistryManager):
def GetCredential(self, api, xrn, type, is_self=False):
# convert xrn to hrn
auth_hrn = api.auth.get_authority(hrn)
if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
auth_hrn = hrn
- # get record info
auth_info = api.auth.get_auth_info(auth_hrn)
- table = SfaTable()
- records = table.findObjects({'type': type, 'hrn': hrn})
- if not records:
- raise RecordNotFound(hrn)
- record = records[0]
+ # get record info
+ record=dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+ if not record:
+ raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
- self.driver.augment_records_with_testbed_info (record)
- if not self.driver.is_enabled (record):
- raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record['email']))
+ logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
+ self.driver.augment_records_with_testbed_info (record.__dict__)
+ logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
+ if not self.driver.is_enabled (record.__dict__):
+ raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
# get the callers gid
# if this is a self cred the record's gid is the caller's gid
caller_hrn = caller_gid.get_hrn()
object_hrn = record.get_gid_object().get_hrn()
- rights = api.auth.determine_user_rights(caller_hrn, record)
+ rights = api.auth.determine_user_rights(caller_hrn, record.__dict__)
# make sure caller has rights to this object
if rights.is_empty():
- raise PermissionError(caller_hrn + " has no rights to " + record['name'])
+ raise PermissionError(caller_hrn + " has no rights to " + record.hrn)
- object_gid = GID(string=record['gid'])
+ object_gid = GID(string=record.gid)
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(caller_gid)
new_cred.set_gid_object(object_gid)
#new_cred.set_pubkey(object_gid.get_pubkey())
new_cred.set_privileges(rights)
new_cred.get_privileges().delegate_all_privileges(True)
- if 'expires' in record:
- date = utcparse(record['expires'])
+ if hasattr(record,'expires'):
+ date = utcparse(record.expires)
expires = datetime_to_epoch(date)
new_cred.set_expiration(int(expires))
auth_kind = "authority,ma,sa"
# subject_record describes the subject of the relationships
# ref_record contains the target values for the various relationships we need to manage
# (to begin with, this is just the slice x person relationship)
- def update_relations (self, subject_record, ref_record):
- type=subject_record['type']
+ def update_relations (self, subject_obj, ref_obj):
+ type=subject_obj.type
if type=='slice':
- self.update_relation(subject_record, 'researcher', ref_record.get('researcher'), 'user')
+ self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user')
# field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
# hrns is the list of hrns that should be linked to the subject from now on
# target_type would be e.g. 'user' in the 'slice' x 'researcher' example
- def update_relation (self, sfa_record, field_key, hrns, target_type):
+ def update_relation (self, record_obj, field_key, hrns, target_type):
# locate the linked objects in our db
- subject_type=sfa_record['type']
- subject_id=sfa_record['pointer']
- table = SfaTable()
- link_sfa_records = table.find ({'type':target_type, 'hrn': hrns})
- link_ids = [ rec.get('pointer') for rec in link_sfa_records ]
+ subject_type=record_obj.type
+ subject_id=record_obj.pointer
+ # get the 'pointer' field of all matching records
+ link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all()
+ # sqlalchemy returns named tuples for columns
+ link_ids = [ tuple.pointer for tuple in link_id_tuples ]
self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
-
# make sure users info is specified
if not users:
- msg = "'users' musst be specified and cannot be null. You may need to update your client."
+ msg = "'users' must be specified and cannot be null. You may need to update your client."
raise SfaInvalidArgument(name='users', extra=msg)
# flter rspec through sfatables
from sfa.trust.certificate import Certificate
from sfa.storage.parameter import Parameter, Mixed
-from sfa.storage.record import SfaRecord
class GetSelfCredential(Method):
"""
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name))
- # authenticate the gid
+ ### authenticate the gid
+ # import here so we can load this module at build-time for sfa2wsdl
+ #from sfa.storage.alchemy import dbsession
+ from sfa.storage.model import RegRecord
+
+ # xxx-local - the current code runs Resolve, which would forward to
+ # another registry if needed
+ # I wonder if this is truly the intention, or shouldn't we instead
+ # only look in the local db ?
records = self.api.manager.Resolve(self.api, xrn, type)
if not records:
raise RecordNotFound(hrn)
- record = SfaRecord(dict=records[0])
- gid = record.get_gid_object()
+
+ record_obj = RegRecord (dict=records[0])
+ # xxx-local the local-only version would read
+ #record_obj = dbsession.query(RegRecord).filter_by(hrn=hrn).first()
+ #if not record_obj: raise RecordNotFound(hrn)
+ gid = record_obj.get_gid_object()
gid_str = gid.save_to_string(save_parents=True)
self.api.auth.authenticateGid(gid_str, [cert, type, hrn])
# authenticate the certificate against the gid in the db
from sfa.trust.credential import Credential
from sfa.storage.parameter import Parameter, Mixed
-from sfa.storage.record import SfaRecord
class List(Method):
"""
Parameter(type([str]), "List of credentials")),
]
- returns = [SfaRecord]
+ # xxx used to be [SfaRecord]
+ returns = [Parameter(dict, "registry record")]
def call(self, xrn, creds):
hrn, type = urn_to_hrn(xrn)
from sfa.trust.credential import Credential
from sfa.storage.parameter import Parameter, Mixed
-from sfa.storage.record import SfaRecord
class Resolve(Method):
"""
Parameter(list, "List of credentials)"))
]
- returns = [SfaRecord]
+ # xxx used to be [SfaRecord]
+ returns = [Parameter(dict, "registry record")]
def call(self, xrns, creds):
type = None
#
from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+
from sfa.util.sfalogging import logger
from sfa.util.defaultdict import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.util.cache import Cache
# used to be used in get_ticket
#from sfa.trust.sfaticket import SfaTicket
+
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
+
# the driver interface, mostly provides default behaviours
from sfa.managers.driver import Driver
from sfa.openstack.nova_shell import NovaShell
from sfa.util.cache import Cache
# one would think the driver should not need to mess with the SFA db, but..
-from sfa.storage.table import SfaTable
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
# used to be used in get_ticket
#from sfa.trust.sfaticket import SfaTicket
self.shell.AddPersonToSite(pointer, login_base)
# What roles should this user have?
- self.shell.AddRoleToPerson('user', pointer)
+ roles=[]
+ if 'roles' in sfa_record:
+ # if specified in xml, but only low-level roles
+ roles = [ role for role in sfa_record['roles'] if role in ['user','tech'] ]
+ # at least user if no other cluse could be found
+ if not roles:
+ roles=['user']
+ for role in roles:
+ self.shell.AddRoleToPerson(role, pointer)
# Add the user's key
if pub_key:
self.shell.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
##
- # Convert SFA fields to PLC fields for use when registering up updating
+ # Convert SFA fields to PLC fields for use when registering or updating
# registry record in the PLC database
#
pl_record["url"] = sfa_record["url"]
if "description" in sfa_record:
pl_record["description"] = sfa_record["description"]
- if "expires" in sfa_record:
- date = utcparse(sfa_record['expires'])
- expires = datetime_to_epoch(date)
- pl_record["expires"] = expires
+ if "expires" in sfa_record:
+ date = utcparse(sfa_record['expires'])
+ expires = datetime_to_epoch(date)
+ pl_record["expires"] = expires
elif type == "node":
if not "hostname" in pl_record:
# we'll replace pl ids (person_ids) with hrns from the sfa records
# we obtain
- # get the sfa records
- table = SfaTable()
+ # get the registry records
person_list, persons = [], {}
- person_list = table.find({'type': 'user', 'pointer': person_ids})
+ person_list = dbsession.query (RegRecord).filter(RegRecord.pointer.in_(person_ids))
# create a hrns keyed on the sfa record's pointer.
# Its possible for multiple records to have the same pointer so
# the dict's value will be a list of hrns.
persons = defaultdict(list)
for person in person_list:
- persons[person['pointer']].append(person)
+ persons[person.pointer].append(person)
# get the pl records
pl_person_list, pl_persons = [], {}
# continue
sfa_info = {}
type = record['type']
+ logger.info("fill_record_sfa_info - incoming record typed %s"%type)
if (type == "slice"):
# all slice users are researchers
record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
record['PI'] = []
record['researcher'] = []
for person_id in record.get('person_ids', []):
- hrns = [person['hrn'] for person in persons[person_id]]
+ hrns = [person.hrn for person in persons[person_id]]
record['researcher'].extend(hrns)
# pis at the slice's site
pl_pis = site_pis[record['site_id']]
pi_ids = [pi['person_id'] for pi in pl_pis]
for person_id in pi_ids:
- hrns = [person['hrn'] for person in persons[person_id]]
+ hrns = [person.hrn for person in persons[person_id]]
record['PI'].extend(hrns)
record['geni_creator'] = record['PI']
elif (type.startswith("authority")):
record['url'] = None
+ logger.info("fill_record_sfa_info - authority xherex")
if record['pointer'] != -1:
record['PI'] = []
record['operator'] = []
if pointer not in persons or pointer not in pl_persons:
# this means there is not sfa or pl record for this user
continue
- hrns = [person['hrn'] for person in persons[pointer]]
+ hrns = [person.hrn for person in persons[pointer]]
roles = pl_persons[pointer]['roles']
if 'pi' in roles:
record['PI'].extend(hrns)
# xxx TODO: URI, LatLong, IP, DNS
elif (type == "user"):
+ logger.info('setting user.email')
sfa_info['email'] = record.get("email", "")
sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
sfa_info['geni_certificate'] = record['gid']
--- /dev/null
+from sfa.rspecs.versions.pgv2 import PGv2Ad, PGv2Request, PGv2Manifest
+
+class FedericaAd (PGv2Ad):
+ enabled = True
+ schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd'
+ namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+
+class FedericaRequest (PGv2Request):
+ enabled = True
+ schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/request.xsd'
+ namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+
+class FedericaManifest (PGv2Manifest):
+ enabled = True
+ schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/manifest.xsd'
+ namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+
def __init__(self, ip, port, key_file, cert_file):
SfaServer.__init__(self, ip, port, key_file, cert_file,'registry')
+ sfa_config=Config()
+ if Config().SFA_REGISTRY_ENABLED:
+ from sfa.storage.alchemy import engine
+ from sfa.storage.dbschema import DBSchema
+ DBSchema().init_or_upgrade()
#
# Registries is a dictionary of registry connections keyed on the registry hrn
from sfa.trust.gid import GID, create_uuid
from sfa.trust.hierarchy import Hierarchy
-from sfa.storage.table import SfaTable
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
def main():
args = sys.argv
hrn = options.export
type = options.type
# check sfa table first
- filter = {'hrn': hrn}
- if type:
- filter['type'] = type
- table = SfaTable()
- records = table.find(filter)
- if not records:
+ request=dbsession.query(RegRecord).filter_by(hrn=hrn)
+ if type: request = request.filter_by(type=type)
+ record=request.first()
+ if not record:
# check the authorities hierarchy
hierarchy = Hierarchy()
try:
print "Record: %s not found" % hrn
sys.exit(1)
else:
- record = records[0]
- gid = GID(string=record['gid'])
+ gid = GID(string=record.gid)
# get the outfile
outfile = options.outfile
sys.exit(1)
# check if record exists in db
- table = SfaTable()
- records = table.find({'hrn': gid.get_hrn(), 'type': 'authority'})
- if not records:
+ record = dbsession.query(RegRecord).filter_by(type='authority',hrn=gid.get_hrn()).first()
+ if not record:
print "%s not found in record database" % gid.get_hrn()
sys.exit(1)
# update the database record
- record = records[0]
- record['gid'] = gid.save_to_string(save_parents=True)
- table.update(record)
+ record.gid = gid.save_to_string(save_parents=True)
+ dbsession.commit()
if options.verbose:
print "Imported %s gid into db" % record['hrn']
# is up to date and accurate.
#
# 1) Import the existing planetlab database, creating the
-# appropriate SFA records. This is done by running the "sfa-import-plc.py" tool.
+# appropriate SFA records. This is done by running the "sfa-import.py" tool.
#
# 2) Create a "trusted_roots" directory and place the certificate of the root
# authority in that directory. Given the defaults in sfa-import-plc.py, this
Make sure there is a record in the registry for the specified gids.
Removes old records from the db.
"""
- # import SfaTable here so this module can be loaded by PlcComponentApi
- from sfa.storage.table import SfaTable
- from sfa.storage.record import SfaRecord
+ # import db stuff here here so this module can be loaded by PlcComponentApi
+ from sfa.storage.alchemy import dbsession
+ from sfa.storage.model import RegRecord
if not gids:
return
- table = SfaTable()
# get records that actually exist in the db
gid_urns = [gid.get_urn() for gid in gids]
hrns_expected = [gid.get_hrn() for gid in gids]
- records_found = table.find({'hrn': hrns_expected, 'pointer': -1})
+ records_found = dbsession.query(RegRecord).\
+ filter_by(pointer=-1).filter(RegRecord.hrn.in_(hrns_expected)).all()
# remove old records
for record in records_found:
- if record['hrn'] not in hrns_expected and \
- record['hrn'] != self.api.config.SFA_INTERFACE_HRN:
- table.remove(record)
+ if record.hrn not in hrns_expected and \
+ record.hrn != self.api.config.SFA_INTERFACE_HRN:
+ dbsession.delete(record)
# TODO: store urn in the db so we do this in 1 query
for gid in gids:
hrn, type = gid.get_hrn(), gid.get_type()
- record = table.find({'hrn': hrn, 'type': type, 'pointer': -1})
+ record = dbsession.query(RegRecord).filter_by(hrn=hrn, type=type,pointer=-1).first()
if not record:
- record = {
- 'hrn': hrn, 'type': type, 'pointer': -1,
- 'authority': get_authority(hrn),
- 'gid': gid.save_to_string(save_parents=True),
- }
- record = SfaRecord(dict=record)
- table.insert(record)
+ record = RegRecord (dict= {'type':type,
+ 'hrn': hrn,
+ 'authority': get_authority(hrn),
+ 'gid': gid.save_to_string(save_parents=True),
+ })
+ dbsession.add(record)
+ dbsession.commit()
def main():
# Generate command line parser
import os, os.path
import datetime
-from sfa.util.faults import SfaFault, SfaAPIError
+from sfa.util.faults import SfaFault, SfaAPIError, RecordNotFound
from sfa.util.genicode import GENICODE
from sfa.util.config import Config
from sfa.util.cache import Cache
-from sfa.trust.auth import Auth
+from sfa.trust.auth import Auth
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.credential import Credential
from sfa.trust.rights import determine_rights
# get a new credential
if self.interface in ['registry']:
- cred = self.__getCredentialRaw()
+ cred = self._getCredentialRaw()
else:
- cred = self.__getCredential()
+ cred = self._getCredential()
cred.save_to_file(cred_filename, save_parents=True)
return cred.save_to_string(save_parents=True)
break
return delegated_cred
- def __getCredential(self):
+ def _getCredential(self):
"""
Get our credential from a remote registry
"""
cred = registry.GetCredential(self_cred, self.hrn, 'authority')
return Credential(string=cred)
- def __getCredentialRaw(self):
+ def _getCredentialRaw(self):
"""
Get our current credential directly from the local registry.
"""
if not auth_hrn or hrn == self.config.SFA_INTERFACE_HRN:
auth_hrn = hrn
auth_info = self.auth.get_auth_info(auth_hrn)
- # xxx thgen fixme - use SfaTable hardwired for now
- # thgen xxx fixme this is wrong all right, but temporary, will use generic
- from sfa.storage.table import SfaTable
- table = SfaTable()
- records = table.findObjects({'hrn': hrn, 'type': 'authority+sa'})
- if not records:
- raise RecordNotFound
- record = records[0]
- type = record['type']
+ from sfa.storage.alchemy import dbsession
+ from sfa.storage.model import RegRecord
+ record = dbsession.query(RegRecord).filter_by(type='authority+sa', hrn=hrn).first()
+ if not record:
+ raise RecordNotFound(hrn)
+ type = record.type
object_gid = record.get_gid_object()
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(object_gid)
+++ /dev/null
-#
-# PostgreSQL database interface. Sort of like DBI(3) (Database
-# independent interface for Perl).
-#
-#
-
-import re
-import traceback
-import commands
-from pprint import pformat
-from types import StringTypes, NoneType
-
-import psycopg2
-import psycopg2.extensions
-psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
-# UNICODEARRAY not exported yet
-psycopg2.extensions.register_type(psycopg2._psycopg.UNICODEARRAY)
-
-# allow to run sfa2wsdl if this is missing (for mac)
-import sys
-try: import pgdb
-except: print >> sys.stderr, "WARNING, could not import pgdb"
-
-from sfa.util.faults import SfaDBError
-from sfa.util.sfalogging import logger
-from sfa.storage.filter import Filter
-
-if not psycopg2:
- is8bit = re.compile("[\x80-\xff]").search
-
- def unicast(typecast):
- """
- pgdb returns raw UTF-8 strings. This function casts strings that
- apppear to contain non-ASCII characters to unicode objects.
- """
-
- def wrapper(*args, **kwds):
- value = typecast(*args, **kwds)
-
- # pgdb always encodes unicode objects as UTF-8 regardless of
- # the DB encoding (and gives you no option for overriding
- # the encoding), so always decode 8-bit objects as UTF-8.
- if isinstance(value, str) and is8bit(value):
- value = unicode(value, "utf-8")
-
- return value
-
- return wrapper
-
- pgdb.pgdbTypeCache.typecast = unicast(pgdb.pgdbTypeCache.typecast)
-
-def handle_exception(f):
- def wrapper(*args, **kwds):
- try: return f(*args, **kwds)
- except Exception, fault:
- raise SfaDBError(str(fault))
- return wrapper
-
-class PostgreSQL:
- def __init__(self, config):
- self.config = config
- self.debug = False
-# self.debug = True
- self.connection = None
-
- @handle_exception
- def cursor(self):
- if self.connection is None:
- # (Re)initialize database connection
- if psycopg2:
- try:
- # Try UNIX socket first
- self.connection = psycopg2.connect(user = self.config.SFA_DB_USER,
- password = self.config.SFA_DB_PASSWORD,
- database = self.config.SFA_DB_NAME)
- except psycopg2.OperationalError:
- # Fall back on TCP
- self.connection = psycopg2.connect(user = self.config.SFA_DB_USER,
- password = self.config.SFA_DB_PASSWORD,
- database = self.config.SFA_DB_NAME,
- host = self.config.SFA_DB_HOST,
- port = self.config.SFA_DB_PORT)
- self.connection.set_client_encoding("UNICODE")
- else:
- self.connection = pgdb.connect(user = self.config.SFA_DB_USER,
- password = self.config.SFA_DB_PASSWORD,
- host = "%s:%d" % (self.config.SFA_DB_HOST, self.config.SFA_DB_PORT),
- database = self.config.SFA_DB_NAME)
-
- (self.rowcount, self.description, self.lastrowid) = \
- (None, None, None)
-
- return self.connection.cursor()
-
- def close(self):
- if self.connection is not None:
- self.connection.close()
- self.connection = None
-
- def quote(self, value):
- """
- Returns quoted version of the specified value.
- """
-
- # The pgdb._quote function is good enough for general SQL
- # quoting, except for array types.
- if isinstance(value, (list, tuple, set)):
- return "ARRAY[%s]" % ", ".join(map, self.quote, value)
- else:
- return Filter._quote(value)
-
- quote = classmethod(quote)
-
- def param(self, name, value):
- # None is converted to the unquoted string NULL
- if isinstance(value, NoneType):
- conversion = "s"
- # True and False are also converted to unquoted strings
- elif isinstance(value, bool):
- conversion = "s"
- elif isinstance(value, float):
- conversion = "f"
- elif not isinstance(value, StringTypes):
- conversion = "d"
- else:
- conversion = "s"
-
- return '%(' + name + ')' + conversion
-
- param = classmethod(param)
-
- def begin_work(self):
- # Implicit in pgdb.connect()
- pass
-
- def commit(self):
- self.connection.commit()
-
- def rollback(self):
- self.connection.rollback()
-
- def do(self, query, params = None):
- cursor = self.execute(query, params)
- cursor.close()
- return self.rowcount
-
- def next_id(self, table_name, primary_key):
- sequence = "%(table_name)s_%(primary_key)s_seq" % locals()
- sql = "SELECT nextval('%(sequence)s')" % locals()
- rows = self.selectall(sql, hashref = False)
- if rows:
- return rows[0][0]
- return None
-
- def last_insert_id(self, table_name, primary_key):
- if isinstance(self.lastrowid, int):
- sql = "SELECT %s FROM %s WHERE oid = %d" % \
- (primary_key, table_name, self.lastrowid)
- rows = self.selectall(sql, hashref = False)
- if rows:
- return rows[0][0]
-
- return None
-
- # modified for psycopg2-2.0.7
- # executemany is undefined for SELECT's
- # see http://www.python.org/dev/peps/pep-0249/
- # accepts either None, a single dict, a tuple of single dict - in which case it execute's
- # or a tuple of several dicts, in which case it executemany's
- def execute(self, query, params = None):
-
- cursor = self.cursor()
- try:
-
- # psycopg2 requires %()s format for all parameters,
- # regardless of type.
- # this needs to be done carefully though as with pattern-based filters
- # we might have percents embedded in the query
- # so e.g. GetPersons({'email':'*fake*'}) was resulting in .. LIKE '%sake%'
- if psycopg2:
- query = re.sub(r'(%\([^)]*\)|%)[df]', r'\1s', query)
- # rewrite wildcards set by Filter.py as '***' into '%'
- query = query.replace ('***','%')
-
- if not params:
- if self.debug:
- logger.debug('execute0 %r'%query)
- cursor.execute(query)
- elif isinstance(params,dict):
- if self.debug:
- logger.debug('execute-dict: params=[%r] query=[%r]'%(params,query%params))
- cursor.execute(query,params)
- elif isinstance(params,tuple) and len(params)==1:
- if self.debug:
- logger.debug('execute-tuple %r'%(query%params[0]))
- cursor.execute(query,params[0])
- else:
- param_seq=(params,)
- if self.debug:
- for params in param_seq:
- logger.debug('executemany %r'%(query%params))
- cursor.executemany(query, param_seq)
- (self.rowcount, self.description, self.lastrowid) = \
- (cursor.rowcount, cursor.description, cursor.lastrowid)
- except Exception, e:
- try:
- self.rollback()
- except:
- pass
- uuid = commands.getoutput("uuidgen")
- logger.error("Database error %s:" % uuid)
- logger.error("Exception=%r"%e)
- logger.error("Query=%r"%query)
- logger.error("Params=%r"%pformat(params))
- logger.log_exc("PostgreSQL.execute caught exception")
- raise SfaDBError("Please contact support: %s" % str(e))
-
- return cursor
-
- def selectall(self, query, params = None, hashref = True, key_field = None):
- """
- Return each row as a dictionary keyed on field name (like DBI
- selectrow_hashref()). If key_field is specified, return rows
- as a dictionary keyed on the specified field (like DBI
- selectall_hashref()).
-
- If params is specified, the specified parameters will be bound
- to the query.
- """
-
- cursor = self.execute(query, params)
- rows = cursor.fetchall()
- cursor.close()
- self.commit()
- if hashref or key_field is not None:
- # Return each row as a dictionary keyed on field name
- # (like DBI selectrow_hashref()).
- labels = [column[0] for column in self.description]
- rows = [dict(zip(labels, row)) for row in rows]
-
- if key_field is not None and key_field in labels:
- # Return rows as a dictionary keyed on the specified field
- # (like DBI selectall_hashref()).
- return dict([(row[key_field], row) for row in rows])
- else:
- return rows
-
- def fields(self, table, notnull = None, hasdef = None):
- """
- Return the names of the fields of the specified table.
- """
-
- if hasattr(self, 'fields_cache'):
- if self.fields_cache.has_key((table, notnull, hasdef)):
- return self.fields_cache[(table, notnull, hasdef)]
- else:
- self.fields_cache = {}
-
- sql = "SELECT attname FROM pg_attribute, pg_class" \
- " WHERE pg_class.oid = attrelid" \
- " AND attnum > 0 AND relname = %(table)s"
-
- if notnull is not None:
- sql += " AND attnotnull is %(notnull)s"
-
- if hasdef is not None:
- sql += " AND atthasdef is %(hasdef)s"
-
- rows = self.selectall(sql, locals(), hashref = False)
-
- self.fields_cache[(table, notnull, hasdef)] = [row[0] for row in rows]
-
- return self.fields_cache[(table, notnull, hasdef)]
--- /dev/null
+from types import StringTypes
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy import ForeignKey
+
+from sfa.util.sfalogging import logger
+
+# this module is designed to be loaded when the configured db server is reachable
+# OTOH model can be loaded from anywhere including the client-side
+
+class Alchemy:
+
+ def __init__ (self, config):
+ dbname="sfa"
+ # will be created lazily on-demand
+ self._session = None
+ # the former PostgreSQL.py used the psycopg2 directly and was doing
+ #self.connection.set_client_encoding("UNICODE")
+ # it's unclear how to achieve this in sqlalchemy, nor if it's needed at all
+ # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+ # we indeed have /var/lib/pgsql/data/postgresql.conf where
+ # this setting is unset, it might be an angle to tweak that if need be
+ # try a unix socket first - omitting the hostname does the trick
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\
+ (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname)
+ # the TCP fallback method
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
+ (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
+ for url in [ unix_url, tcp_url ] :
+ try:
+ self.engine = create_engine (url)
+ self.check()
+ self.url=url
+ return
+ except:
+ pass
+ self.engine=None
+ raise Exception,"Could not connect to database"
+
+
+ # expects boolean True: debug is ON or False: debug is OFF
+ def debug (self, echo):
+ self.engine.echo=echo
+
+ def check (self):
+ self.engine.execute ("select 1").scalar()
+
+ def session (self):
+ if self._session is None:
+ Session=sessionmaker ()
+ self._session=Session(bind=self.engine)
+ return self._session
+
+ def close_session (self):
+ if self._session is None: return
+ self._session.close()
+ self._session=None
+
+####################
+from sfa.util.config import Config
+
+alchemy=Alchemy (Config())
+engine=alchemy.engine
+dbsession=alchemy.session()
+
--- /dev/null
+import sys
+import traceback
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+import migrate.versioning.api as migrate
+
+from sfa.util.sfalogging import logger
+import sfa.storage.model as model
+
+########## this class takes care of database upgrades
+### upgrade from a pre-2.1 db
+# * 1.0 and up to 1.1-4: ('very old')
+# was piggybacking the planetlab5 database
+# this is kind of out of our scope here, we don't have the credentials
+# to connect to planetlab5, but this is documented in
+# https://svn.planet-lab.org/wiki/SFATutorialConfigureSFA#Upgradingnotes
+# and essentially this is seamless to users
+# * from 1.1-5 up to 2.0-x: ('old')
+# uses the 'sfa' db and essentially the 'records' table,
+# as well as record_types
+# together with an 'sfa_db_version' table (version, subversion)
+# * from 2.1:
+# we have an 'records' table, plus 'users' and the like
+# and once migrate has kicked in there is a table named (see migrate.cfg)
+# migrate_db_version (repository_id, repository_path, version)
+### after 2.1
+# Starting with 2.1, we use sqlalchemy-migrate scripts in a standard way
+# Note that the model defined in sfa.storage.model needs to be maintained
+# as the 'current/latest' version, and newly installed deployments will
+# then 'jump' to the latest version number without going through the migrations
+###
+# An initial attempt to run this as a 001_*.py migrate script
+# did not quite work out (essentially we need to set the current version
+# number out of the migrations logic)
+# also this approach has less stuff in the initscript, which seems just right
+
+class DBSchema:
+
+ header="Upgrading to 2.1 or higher"
+
+ def __init__ (self):
+ from sfa.storage.alchemy import alchemy
+ self.url=alchemy.url
+ self.engine=alchemy.engine
+ self.repository="/usr/share/sfa/migrations"
+
+ def current_version (self):
+ try:
+ return migrate.db_version (self.url, self.repository)
+ except:
+ return None
+
+ def table_exists (self, tablename):
+ try:
+ metadata = MetaData (bind=self.engine)
+ table=Table (tablename, metadata, autoload=True)
+ return True
+ except NoSuchTableError:
+ return False
+
+ def drop_table (self, tablename):
+ if self.table_exists (tablename):
+ print >>sys.stderr, "%s: Dropping table %s"%(DBSchema.header,tablename)
+ self.engine.execute ("drop table %s cascade"%tablename)
+ else:
+ print >>sys.stderr, "%s: no need to drop table %s"%(DBSchema.header,tablename)
+
+ def handle_old_releases (self):
+ try:
+ # try to find out which old version this can be
+ if not self.table_exists ('records'):
+ # this likely means
+ # (.) we've just created the db, so it's either a fresh install, or
+ # (.) we come from a 'very old' depl.
+ # in either case, an import is required but there's nothing to clean up
+ print >> sys.stderr,"%s: make sure to run import"%(DBSchema.header,)
+ elif self.table_exists ('sfa_db_version'):
+ # we come from an 'old' version
+ self.drop_table ('records')
+ self.drop_table ('record_types')
+ self.drop_table ('sfa_db_version')
+ else:
+ # we should be good here
+ pass
+ except:
+ print >> sys.stderr, "%s: unknown exception"%(DBSchema.header,)
+ traceback.print_exc ()
+
+ # after this call the db schema and the version as known by migrate should
+ # reflect the current data model and the latest known version
+ def init_or_upgrade (self):
+ # check if under version control, and initialize it otherwise
+ if self.current_version() is None:
+ before="Unknown"
+ # can be either a very old version, or a fresh install
+ # for very old versions:
+ self.handle_old_releases()
+ # in any case, initialize db from current code and reflect in migrate
+ model.init_tables(self.engine)
+ code_version = migrate.version (self.repository)
+ migrate.version_control (self.url, self.repository, code_version)
+ after="%s"%self.current_version()
+ logger.info("DBSchema : jumped to version %s"%(after))
+ else:
+ # use migrate in the usual way
+ before="%s"%self.current_version()
+ migrate.upgrade (self.url, self.repository)
+ after="%s"%self.current_version()
+ if before != after:
+ logger.info("DBSchema : upgraded version from %s to %s"%(before,after))
+ else:
+ logger.debug("DBSchema : no change needed in db schema (%s==%s)"%(before,after))
+
+ # this trashes the db altogether, from the current model in sfa.storage.model
+ # I hope this won't collide with ongoing migrations and all
+ # actually, now that sfa uses its own db, this is essentially equivalent to
+ # dropping the db entirely, modulo a 'service sfa start'
+ def nuke (self):
+ model.drop_tables(self.engine)
+ # so in this case it's like we haven't initialized the db at all
+ migrate.drop_version_control (self.url, self.repository)
+
+
+if __name__ == '__main__':
+ DBSchema().init_or_upgrade()
+++ /dev/null
-import types
-import datetime
-
-from sfa.util.faults import SfaInvalidArgument
-from sfa.storage.parameter import Parameter, Mixed, python_type
-
-class Filter(Parameter, dict):
- """
- A type of parameter that represents a filter on one or more
- columns of a database table.
- Special features provide support for negation, upper and lower bounds,
- as well as sorting and clipping.
-
-
- fields should be a dictionary of field names and types
- Only filters on non-sequence type fields are supported.
- example : fields = {'node_id': Parameter(int, "Node identifier"),
- 'hostname': Parameter(int, "Fully qualified hostname", max = 255),
- ...}
-
-
- filter should be a dictionary of field names and values
- representing the criteria for filtering.
- example : filter = { 'hostname' : '*.edu' , site_id : [34,54] }
- Whether the filter represents an intersection (AND) or a union (OR)
- of these criteria is determined by the join_with argument
- provided to the sql method below
-
- Special features:
-
- * a field starting with the ~ character means negation.
- example : filter = { '~peer_id' : None }
-
- * a field starting with < [ ] or > means lower than or greater than
- < > uses strict comparison
- [ ] is for using <= or >= instead
- example : filter = { ']event_id' : 2305 }
- example : filter = { '>time' : 1178531418 }
- in this example the integer value denotes a unix timestamp
-
- * if a value is a sequence type, then it should represent
- a list of possible values for that field
- example : filter = { 'node_id' : [12,34,56] }
-
- * a (string) value containing either a * or a % character is
- treated as a (sql) pattern; * are replaced with % that is the
- SQL wildcard character.
- example : filter = { 'hostname' : '*.jp' }
-
- * fields starting with - are special and relate to row selection, i.e. sorting and clipping
- * '-SORT' : a field name, or an ordered list of field names that are used for sorting
- these fields may start with + (default) or - for denoting increasing or decreasing order
- example : filter = { '-SORT' : [ '+node_id', '-hostname' ] }
- * '-OFFSET' : the number of first rows to be ommitted
- * '-LIMIT' : the amount of rows to be returned
- example : filter = { '-OFFSET' : 100, '-LIMIT':25}
-
- A realistic example would read
- GetNodes ( { 'node_type' : 'regular' , 'hostname' : '*.edu' , '-SORT' : 'hostname' , '-OFFSET' : 30 , '-LIMIT' : 25 } )
- and that would return regular (usual) nodes matching '*.edu' in alphabetical order from 31th to 55th
- """
-
- def __init__(self, fields = {}, filter = {}, doc = "Attribute filter"):
- # Store the filter in our dict instance
- valid_fields = {}
- for field in filter:
- if field in fields:
- valid_fields[field] = filter[field]
- dict.__init__(self, valid_fields)
-
- # Declare ourselves as a type of parameter that can take
- # either a value or a list of values for each of the specified
- # fields.
- self.fields = dict ( [ ( field, Mixed (expected, [expected]))
- for (field,expected) in fields.iteritems()
- if python_type(expected) not in (list, tuple, set) ] )
-
- # Null filter means no filter
- Parameter.__init__(self, self.fields, doc = doc, nullok = True)
-
- def quote(self, value):
- """
- Returns quoted version of the specified value.
- """
-
- # The pgdb._quote function is good enough for general SQL
- # quoting, except for array types.
- if isinstance(value, (list, tuple, set)):
- return "ARRAY[%s]" % ", ".join(map(self.quote, value))
-
- else:
- return Filter._quote(value)
-
- # pgdb._quote isn't supported in python 2.7/f16, so let's implement it here
- @staticmethod
- def _quote(x):
- if isinstance(x, datetime.datetime):
- x = str(x)
- elif isinstance(x, unicode):
- x = x.encode( 'utf-8' )
-
- if isinstance(x, types.StringType):
- x = "'%s'" % str(x).replace("\\", "\\\\").replace("'", "''")
- elif isinstance(x, (types.IntType, types.LongType, types.FloatType)):
- pass
- elif x is None:
- x = 'NULL'
- elif isinstance(x, (types.ListType, types.TupleType)):
- x = '(%s)' % ','.join(map(lambda x: str(_quote(x)), x))
- elif hasattr(x, '__pg_repr__'):
- x = x.__pg_repr__()
- else:
- raise TypeError, 'do not know how to handle type %s' % type(x)
-
- return x
-
- def sql(self, join_with = "AND"):
- """
- Returns a SQL conditional that represents this filter.
- """
-
- # So that we always return something
- if join_with == "AND":
- conditionals = ["True"]
- elif join_with == "OR":
- conditionals = ["False"]
- else:
- assert join_with in ("AND", "OR")
-
- # init
- sorts = []
- clips = []
-
- for field, value in self.iteritems():
- # handle negation, numeric comparisons
- # simple, 1-depth only mechanism
-
- modifiers={'~' : False,
- '<' : False, '>' : False,
- '[' : False, ']' : False,
- '-' : False,
- }
-
- for char in modifiers.keys():
- if field[0] == char:
- modifiers[char]=True
- field = field[1:]
- break
-
- # filter on fields
- if not modifiers['-']:
- if field not in self.fields:
- raise SfaInvalidArgument, "Invalid filter field '%s'" % field
-
- if isinstance(value, (list, tuple, set)):
- # handling filters like '~slice_id':[]
- # this should return true, as it's the opposite of 'slice_id':[] which is false
- # prior to this fix, 'slice_id':[] would have returned ``slice_id IN (NULL) '' which is unknown
- # so it worked by coincidence, but the negation '~slice_ids':[] would return false too
- if not value:
- field=""
- operator=""
- value = "FALSE"
- else:
- operator = "IN"
- value = map(str, map(self.quote, value))
- value = "(%s)" % ", ".join(value)
- else:
- if value is None:
- operator = "IS"
- value = "NULL"
- elif isinstance(value, types.StringTypes) and \
- (value.find("*") > -1 or value.find("%") > -1):
- operator = "LIKE"
- # insert *** in pattern instead of either * or %
- # we dont use % as requests are likely to %-expansion later on
- # actual replacement to % done in PostgreSQL.py
- value = value.replace ('*','***')
- value = value.replace ('%','***')
- value = str(self.quote(value))
- else:
- operator = "="
- if modifiers['<']:
- operator='<'
- if modifiers['>']:
- operator='>'
- if modifiers['[']:
- operator='<='
- if modifiers[']']:
- operator='>='
- else:
- value = str(self.quote(value))
-
- clause = "%s %s %s" % (field, operator, value)
-
- if modifiers['~']:
- clause = " ( NOT %s ) " % (clause)
-
- conditionals.append(clause)
- # sorting and clipping
- else:
- if field not in ('SORT','OFFSET','LIMIT'):
- raise SfaInvalidArgument, "Invalid filter, unknown sort and clip field %r"%field
- # sorting
- if field == 'SORT':
- if not isinstance(value,(list,tuple,set)):
- value=[value]
- for field in value:
- order = 'ASC'
- if field[0] == '+':
- field = field[1:]
- elif field[0] == '-':
- field = field[1:]
- order = 'DESC'
- if field not in self.fields:
- raise SfaInvalidArgument, "Invalid field %r in SORT filter"%field
- sorts.append("%s %s"%(field,order))
- # clipping
- elif field == 'OFFSET':
- clips.append("OFFSET %d"%value)
- # clipping continued
- elif field == 'LIMIT' :
- clips.append("LIMIT %d"%value)
-
- where_part = (" %s " % join_with).join(conditionals)
- clip_part = ""
- if sorts:
- clip_part += " ORDER BY " + ",".join(sorts)
- if clips:
- clip_part += " " + " ".join(clips)
- return (where_part,clip_part)
--- /dev/null
+[db_settings]
+# Used to identify which repository this database is versioned under.
+# You can use the name of your project.
+repository_id=sqlalchemy-migrate repository for SFA-2.1 and on
+
+# The name of the database table used to track the schema version.
+# This name shouldn't already be used by your project.
+# If this is changed once a database is under version control, you'll need to
+# change the table name in each database too.
+version_table=migrate_db_version
+
+# When committing a change script, Migrate will attempt to generate the
+# sql for all supported databases; normally, if one of them fails - probably
+# because you don't have that database installed - it is ignored and the
+# commit continues, perhaps ending successfully.
+# Databases in this list MUST compile successfully during a commit, or the
+# entire commit will fail. List the databases your application will actually
+# be using to ensure your updates to that database work properly.
+# This must be a list; example: ['postgres','sqlite']
+required_dbs=['postgres']
--- /dev/null
+# this move is about adding a slice x users many to many relation ship for modelling
+# regular "membership" of users in a slice
+
+from sqlalchemy import Table, MetaData, Column, ForeignKey
+from sqlalchemy import Integer, String
+
+metadata=MetaData()
+
+# this is needed my migrate so it can locate 'records.record_id'
+records = \
+ Table ( 'records', metadata,
+ Column ('record_id', Integer, primary_key=True),
+ )
+
+# slice x user (researchers) association
+slice_researcher_table = \
+ Table ( 'slice_researcher', metadata,
+ Column ('slice_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+ Column ('researcher_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+ )
+
+def upgrade(migrate_engine):
+ metadata.bind = migrate_engine
+ slice_researcher_table.create()
+
+def downgrade(migrate_engine):
+ metadata.bind = migrate_engine
+ slice_researcher_table.drop()
--- /dev/null
+from types import StringTypes
+from datetime import datetime
+
+from sqlalchemy import Column, Integer, String, DateTime
+from sqlalchemy import Table, Column, MetaData, join, ForeignKey
+from sqlalchemy.orm import relationship, backref
+from sqlalchemy.orm import column_property
+from sqlalchemy.orm import object_mapper
+from sqlalchemy.orm import validates
+from sqlalchemy.ext.declarative import declarative_base
+
+from sfa.util.sfalogging import logger
+from sfa.util.sfatime import utcparse, datetime_to_string
+from sfa.util.xml import XML
+
+from sfa.trust.gid import GID
+
+##############################
+Base=declarative_base()
+
+####################
+# dicts vs objects
+####################
+# historically the front end to the db dealt with dicts, so the code was only dealing with dicts
+# sqlalchemy however offers an object interface, meaning that you write obj.id instead of obj['id']
+# which is admittedly much nicer
+# however we still need to deal with dictionaries if only for the xmlrpc layer
+#
+# here are a few utilities for this
+#
+# (*) first off, when an old pieve of code needs to be used as-is, if only temporarily, the simplest trick
+# is to use obj.__dict__
+# this behaves exactly like required, i.e. obj.__dict__['field']='new value' does change obj.field
+# however this depends on sqlalchemy's implementation so it should be avoided
+#
+# (*) second, when an object needs to be exposed to the xmlrpc layer, we need to convert it into a dict
+# remember though that writing the resulting dictionary won't change the object
+# essentially obj.__dict__ would be fine too, except that we want to discard alchemy private keys starting with '_'
+# 2 ways are provided for that:
+# . dict(obj)
+# . obj.todict()
+# the former dict(obj) relies on __iter__() and next() below, and does not rely on the fields names
+# although it seems to work fine, I've found cases where it issues a weird python error that I could not get right
+# so the latter obj.todict() seems more reliable but more hacky as is relies on the form of fields, so this can probably be improved
+#
+# (*) finally for converting a dictionary into an sqlalchemy object, we provide
+# obj.load_from_dict(dict)
+
+class AlchemyObj:
+ def __iter__(self):
+ self._i = iter(object_mapper(self).columns)
+ return self
+ def next(self):
+ n = self._i.next().name
+ return n, getattr(self, n)
+ def todict (self):
+ d=self.__dict__
+ keys=[k for k in d.keys() if not k.startswith('_')]
+ return dict ( [ (k,d[k]) for k in keys ] )
+ def load_from_dict (self, d):
+ for (k,v) in d.iteritems():
+ # experimental
+ if isinstance(v, StringTypes) and v.lower() in ['true']: v=True
+ if isinstance(v, StringTypes) and v.lower() in ['false']: v=False
+ setattr(self,k,v)
+
+ def validate_datetime (self, key, incoming):
+ if isinstance (incoming, datetime): return incoming
+ elif isinstance (incoming, (int,float)):return datetime.fromtimestamp (incoming)
+
+ # in addition we provide convenience for converting to and from xml records
+ # for this purpose only, we need the subclasses to define 'fields' as either
+ # a list or a dictionary
+ def xml_fields (self):
+ fields=self.fields
+ if isinstance(fields,dict): fields=fields.keys()
+ return fields
+
+ def save_as_xml (self):
+ # xxx not sure about the scope here
+ input_dict = dict( [ (key, getattr(self.key), ) for key in self.xml_fields() if getattr(self,key,None) ] )
+ xml_record=XML("<record />")
+ xml_record.parse_dict (input_dict)
+ return xml_record.toxml()
+
+ def dump(self, format=None, dump_parents=False):
+ if not format:
+ format = 'text'
+ else:
+ format = format.lower()
+ if format == 'text':
+ self.dump_text(dump_parents)
+ elif format == 'xml':
+ print self.save_to_string()
+ elif format == 'simple':
+ print self.dump_simple()
+ else:
+ raise Exception, "Invalid format %s" % format
+
+ def dump_text(self, dump_parents=False):
+ # print core fields in this order
+ core_fields = [ 'hrn', 'type', 'authority', 'date_created', 'last_updated', 'gid', ]
+ print "".join(['=' for i in range(40)])
+ print "RECORD"
+ print " hrn:", self.hrn
+ print " type:", self.type
+ print " authority:", self.authority
+ date_created = utcparse(datetime_to_string(self.date_created))
+ print " date created:", date_created
+ last_updated = utcparse(datetime_to_string(self.last_updated))
+ print " last updated:", last_updated
+ print " gid:"
+ print self.get_gid_object().dump_string(8, dump_parents)
+
+ # print remaining fields
+ for attrib_name in dir(self):
+ attrib = getattr(self, attrib_name)
+ # skip internals
+ if attrib_name.startswith('_'): continue
+ # skip core fields
+ if attrib_name in core_fields: continue
+ # skip callables
+ if callable (attrib): continue
+ print " %s: %s" % (attrib_name, attrib)
+
+ def dump_simple(self):
+ return "%s"%self
+
+# # only intended for debugging
+# def inspect (self, logger, message=""):
+# logger.info("%s -- Inspecting AlchemyObj -- attrs"%message)
+# for k in dir(self):
+# if not k.startswith('_'):
+# logger.info (" %s: %s"%(k,getattr(self,k)))
+# logger.info("%s -- Inspecting AlchemyObj -- __dict__"%message)
+# d=self.__dict__
+# for (k,v) in d.iteritems():
+# logger.info("[%s]=%s"%(k,v))
+
+
+##############################
+# various kinds of records are implemented as an inheritance hierarchy
+# RegRecord is the base class for all actual variants
+# a first draft was using 'type' as the discriminator for the inheritance
+# but we had to define another more internal column (classtype) so we
+# accomodate variants in types like authority+am and the like
+
+class RegRecord (Base,AlchemyObj):
+ __tablename__ = 'records'
+ record_id = Column (Integer, primary_key=True)
+ # this is the discriminator that tells which class to use
+ classtype = Column (String)
+ # in a first version type was the discriminator
+ # but that could not accomodate for 'authority+sa' and the like
+ type = Column (String)
+ hrn = Column (String)
+ gid = Column (String)
+ authority = Column (String)
+ peer_authority = Column (String)
+ pointer = Column (Integer, default=-1)
+ date_created = Column (DateTime)
+ last_updated = Column (DateTime)
+ # use the 'type' column to decide which subclass the object is of
+ __mapper_args__ = { 'polymorphic_on' : classtype }
+
+ fields = [ 'type', 'hrn', 'gid', 'authority', 'peer_authority' ]
+ def __init__ (self, type=None, hrn=None, gid=None, authority=None, peer_authority=None,
+ pointer=None, dict=None):
+ if type: self.type=type
+ if hrn: self.hrn=hrn
+ if gid:
+ if isinstance(gid, StringTypes): self.gid=gid
+ else: self.gid=gid.save_to_string(save_parents=True)
+ if authority: self.authority=authority
+ if peer_authority: self.peer_authority=peer_authority
+ if pointer: self.pointer=pointer
+ if dict: self.load_from_dict (dict)
+
+ def __repr__(self):
+ result="<Record id=%s, type=%s, hrn=%s, authority=%s, pointer=%s" % \
+ (self.record_id, self.type, self.hrn, self.authority, self.pointer)
+ # skip the uniform '--- BEGIN CERTIFICATE --' stuff
+ if self.gid: result+=" gid=%s..."%self.gid[28:36]
+ else: result+=" nogid"
+ result += ">"
+ return result
+
+ @validates ('gid')
+ def validate_gid (self, key, gid):
+ if gid is None: return
+ elif isinstance(gid, StringTypes): return gid
+ else: return gid.save_to_string(save_parents=True)
+
+ @validates ('date_created')
+ def validate_date_created (self, key, incoming): return self.validate_datetime (key, incoming)
+
+ @validates ('last_updated')
+ def validate_last_updated (self, key, incoming): return self.validate_datetime (key, incoming)
+
+ # xxx - there might be smarter ways to handle get/set'ing gid using validation hooks
+ def get_gid_object (self):
+ if not self.gid: return None
+ else: return GID(string=self.gid)
+
+ def just_created (self):
+ now=datetime.now()
+ self.date_created=now
+ self.last_updated=now
+
+ def just_updated (self):
+ now=datetime.now()
+ self.last_updated=now
+
+##############################
+# all subclasses define a convenience constructor with a default value for type,
+# and when applicable a way to define local fields in a kwd=value argument
+####################
+class RegAuthority (RegRecord):
+ __tablename__ = 'authorities'
+ __mapper_args__ = { 'polymorphic_identity' : 'authority' }
+ record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+
+ def __init__ (self, **kwds):
+ # fill in type if not previously set
+ if 'type' not in kwds: kwds['type']='authority'
+ # base class constructor
+ RegRecord.__init__(self, **kwds)
+
+ # no proper data yet, just hack the typename
+ def __repr__ (self):
+ return RegRecord.__repr__(self).replace("Record","Authority")
+
+####################
+# slice x user (researchers) association
+slice_researcher_table = \
+ Table ( 'slice_researcher', Base.metadata,
+ Column ('slice_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+ Column ('researcher_id', Integer, ForeignKey ('records.record_id'), primary_key=True),
+ )
+
+####################
+class RegSlice (RegRecord):
+ __tablename__ = 'slices'
+ __mapper_args__ = { 'polymorphic_identity' : 'slice' }
+ record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+ #### extensions come here
+ reg_researchers = relationship \
+ ('RegUser',
+ secondary=slice_researcher_table,
+ primaryjoin=RegRecord.record_id==slice_researcher_table.c.slice_id,
+ secondaryjoin=RegRecord.record_id==slice_researcher_table.c.researcher_id,
+ backref="reg_slices_as_researcher")
+
+ def __init__ (self, **kwds):
+ if 'type' not in kwds: kwds['type']='slice'
+ RegRecord.__init__(self, **kwds)
+
+ def __repr__ (self):
+ return RegRecord.__repr__(self).replace("Record","Slice")
+
+####################
+class RegNode (RegRecord):
+ __tablename__ = 'nodes'
+ __mapper_args__ = { 'polymorphic_identity' : 'node' }
+ record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+
+ def __init__ (self, **kwds):
+ if 'type' not in kwds: kwds['type']='node'
+ RegRecord.__init__(self, **kwds)
+
+ def __repr__ (self):
+ return RegRecord.__repr__(self).replace("Record","Node")
+
+####################
+class RegUser (RegRecord):
+ __tablename__ = 'users'
+ # these objects will have type='user' in the records table
+ __mapper_args__ = { 'polymorphic_identity' : 'user' }
+ record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
+ #### extensions come here
+ email = Column ('email', String)
+ # can't use name 'keys' here because when loading from xml we're getting
+ # a 'keys' tag, and assigning a list of strings in a reference column like this crashes
+ reg_keys = relationship \
+ ('RegKey', backref='reg_user',
+ cascade="all, delete, delete-orphan")
+
+ # so we can use RegUser (email=.., hrn=..) and the like
+ def __init__ (self, **kwds):
+ # handle local settings
+ if 'email' in kwds: self.email=kwds.pop('email')
+ if 'type' not in kwds: kwds['type']='user'
+ RegRecord.__init__(self, **kwds)
+
+ # append stuff at the end of the record __repr__
+ def __repr__ (self):
+ result = RegRecord.__repr__(self).replace("Record","User")
+ result.replace (">"," email=%s"%self.email)
+ result += ">"
+ return result
+
+ @validates('email')
+ def validate_email(self, key, address):
+ assert '@' in address
+ return address
+
+####################
+# xxx tocheck : not sure about eager loading of this one
+# meaning, when querying the whole records, we expect there should
+# be a single query to fetch all the keys
+# or, is it enough that we issue a single query to retrieve all the keys
+class RegKey (Base):
+ __tablename__ = 'keys'
+ key_id = Column (Integer, primary_key=True)
+ record_id = Column (Integer, ForeignKey ("records.record_id"))
+ key = Column (String)
+ pointer = Column (Integer, default = -1)
+
+ def __init__ (self, key, pointer=None):
+ self.key=key
+ if pointer: self.pointer=pointer
+
+ def __repr__ (self):
+ result="<key id=%s key=%s..."%(self.key_id,self.key[8:16],)
+ try: result += " user=%s"%self.reg_user.record_id
+ except: result += " no-user"
+ result += ">"
+ return result
+
+##############################
+# although the db needs of course to be reachable for the following functions
+# the schema management functions are here and not in alchemy
+# because the actual details of the classes need to be known
+# migrations: this code has no notion of the previous versions
+# of the data model nor of migrations
+# sfa.storage.migrations.db_init uses this when starting from
+# a fresh db only
+def init_tables(engine):
+ logger.info("Initializing db schema from current/latest model")
+ Base.metadata.create_all(engine)
+
+def drop_tables(engine):
+ logger.info("Dropping tables from current/latest model")
+ Base.metadata.drop_all(engine)
+
+##############################
+# create a record of the right type from either a dict or an xml string
+def make_record (dict={}, xml=""):
+ if dict: return make_record_dict (dict)
+ elif xml: return make_record_xml (xml)
+ else: raise Exception("make_record has no input")
+
+# convert an incoming record - typically from xmlrpc - into an object
+def make_record_dict (record_dict):
+ assert ('type' in record_dict)
+ type=record_dict['type'].split('+')[0]
+ if type=='authority':
+ result=RegAuthority (dict=record_dict)
+ elif type=='user':
+ result=RegUser (dict=record_dict)
+ elif type=='slice':
+ result=RegSlice (dict=record_dict)
+ elif type=='node':
+ result=RegNode (dict=record_dict)
+ else:
+ logger.debug("Untyped RegRecord instance")
+ result=RegRecord (dict=record_dict)
+ logger.info ("converting dict into Reg* with type=%s"%type)
+ logger.info ("returning=%s"%result)
+ # xxx todo
+ # register non-db attributes in an extensions field
+ return result
+
+def make_record_xml (xml):
+ xml_record = XML(xml)
+ xml_dict = xml_record.todict()
+ logger.info("load from xml, keys=%s"%xml_dict.keys())
+ return make_record_dict (xml_dict)
+
+++ /dev/null
-##
-# Implements support for SFA records
-#
-# TODO: Use existing PLC database methods? or keep this separate?
-##
-
-from types import StringTypes
-from sfa.trust.gid import GID
-from sfa.storage.parameter import Parameter
-from sfa.util.xrn import get_authority
-from sfa.storage.row import Row
-from sfa.util.xml import XML
-from sfa.util.sfalogging import logger
-from sfa.util.sfatime import utcparse, datetime_to_string
-
-class SfaRecord(Row):
- """
- The SfaRecord class implements an SFA Record. A SfaRecord is a tuple
- (Hrn, GID, Type, Info).
-
- Hrn specifies the Human Readable Name of the object
- GID is the GID of the object
- Type is user | authority | slice | component
-
- Info is comprised of the following sub-fields
- pointer = a pointer to the record in the PL database
-
- The pointer is interpreted depending on the type of the record. For example,
- if the type=="user", then pointer is assumed to be a person_id that indexes
- into the persons table.
-
- A given HRN may have more than one record, provided that the records are
- of different types.
- """
-
-# table_name = 'sfa'
-# primary_key = 'record_id'
-
- ### the wsdl generator assumes this is named 'fields'
- internal_fields = {
- 'record_id': Parameter(int, "An id that uniquely identifies this record", ro=True),
- 'pointer': Parameter(int, "An id that uniquely identifies this record in an external database")
- }
-
- fields = {
- 'authority': Parameter(str, "The authority for this record"),
- 'peer_authority': Parameter(str, "The peer authority for this record"),
- 'hrn': Parameter(str, "Human readable name of object"),
- 'gid': Parameter(str, "GID of the object"),
- 'type': Parameter(str, "Record type"),
- 'last_updated': Parameter(int, "Date and time of last update", ro=True),
- 'date_created': Parameter(int, "Date and time this record was created", ro=True),
- }
- all_fields = dict(fields.items() + internal_fields.items())
- ##
- # Create an SFA Record
- #
- # @param name if !=None, assign the name of the record
- # @param gid if !=None, assign the gid of the record
- # @param type one of user | authority | slice | component
- # @param pointer is a pointer to a PLC record
- # @param dict if !=None, then fill in this record from the dictionary
-
- def __init__(self, hrn=None, gid=None, type=None, pointer=None, authority=None,
- peer_authority=None, dict=None, string=None):
- self.dirty = True
- self.hrn = None
- self.gid = None
- self.type = None
- self.pointer = None
- self.set_peer_auth(peer_authority)
- self.set_authority(authority)
- if hrn:
- self.set_name(hrn)
- if gid:
- self.set_gid(gid)
- if type:
- self.set_type(type)
- if pointer:
- self.set_pointer(pointer)
- if dict:
- self.load_from_dict(dict)
- if string:
- self.load_from_string(string)
-
-
- def validate_last_updated(self, last_updated):
- return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
-
- def update(self, new_dict):
- if isinstance(new_dict, list):
- new_dict = new_dict[0]
-
- # Convert any boolean strings to real bools
- for key in new_dict:
- if isinstance(new_dict[key], StringTypes):
- if new_dict[key].lower() in ["true"]:
- new_dict[key] = True
- elif new_dict[key].lower() in ["false"]:
- new_dict[key] = False
- dict.update(self, new_dict)
-
- ##
- # Set the name of the record
- #
- # @param hrn is a string containing the HRN
-
- def set_name(self, hrn):
- """
- Set the name of the record
- """
- self.hrn = hrn
- self['hrn'] = hrn
- self.dirty = True
-
- def set_authority(self, authority):
- """
- Set the authority
- """
- if not authority:
- authority = ""
- self.authority = authority
- self['authority'] = authority
- self.dirty = True
-
-
- ##
- # Set the GID of the record
- #
- # @param gid is a GID object or the string representation of a GID object
-
- def set_gid(self, gid):
- """
- Set the GID of the record
- """
-
- if isinstance(gid, StringTypes):
- self.gid = gid
- self['gid'] = gid
- else:
- self.gid = gid.save_to_string(save_parents=True)
- self['gid'] = gid.save_to_string(save_parents=True)
- self.dirty = True
-
- ##
- # Set the type of the record
- #
- # @param type is a string: user | authority | slice | component
-
- def set_type(self, type):
- """
- Set the type of the record
- """
- self.type = type
- self['type'] = type
- self.dirty = True
-
- ##
- # Set the pointer of the record
- #
- # @param pointer is an integer containing the ID of a PLC record
-
- def set_pointer(self, pointer):
- """
- Set the pointer of the record
- """
- self.pointer = pointer
- self['pointer'] = pointer
- self.dirty = True
-
-
- def set_peer_auth(self, peer_authority):
- self.peer_authority = peer_authority
- self['peer_authority'] = peer_authority
- self.dirty = True
-
- ##
- # Return the name (HRN) of the record
-
- def get_name(self):
- """
- Return the name (HRN) of the record
- """
- return self.hrn
-
- ##
- # Return the type of the record
-
- def get_type(self):
- """
- Return the type of the record
- """
- return self.type
-
- ##
- # Return the pointer of the record. The pointer is an integer that may be
- # used to look up the record in the PLC database. The evaluation of pointer
- # depends on the type of the record
-
- def get_pointer(self):
- """
- Return the pointer of the record. The pointer is an integer that may be
- used to look up the record in the PLC database. The evaluation of pointer
- depends on the type of the record
- """
- return self.pointer
-
- ##
- # Return the GID of the record, in the form of a GID object
- # TODO: not the best name for the function, because we have things called
- # gidObjects in the Cred
-
- def get_gid_object(self):
- """
- Return the GID of the record, in the form of a GID object
- """
- return GID(string=self.gid)
-
- ##
- # Returns the value of a field
-
- def get_field(self, fieldname, default=None):
- # sometimes records act like classes, and sometimes they act like dicts
- try:
- return getattr(self, fieldname)
- except AttributeError:
- try:
- return self[fieldname]
- except KeyError:
- if default != None:
- return default
- else:
- raise
-
- ##
- # Returns a list of field names in this record.
-
- def get_field_names(self):
- """
- Returns a list of field names in this record.
- """
- return self.fields.keys()
-
- ##
- # Given a field name ("hrn", "gid", ...) return the value of that field.
- #
- # @param fieldname is the name of field to be returned
-
- def get_field_value_string(self, fieldname):
- """
- Given a field name ("hrn", "gid", ...) return the value of that field.
- """
- if fieldname == "authority":
- val = get_authority(self['hrn'])
- else:
- try:
- val = getattr(self, fieldname)
- except:
- val = self[fieldname]
- if isinstance(val, str):
- return "'" + str(val) + "'"
- else:
- return str(val)
-
- ##
- # Given a list of field names, return a list of values for those public.
- #
- # @param fieldnames is a list of field names
-
- def get_field_value_strings(self, fieldnames):
- """
- Given a list of field names, return a list of values for those public.
- """
- return [ self.get_field_value_string (fieldname) for fieldname in fieldnames ]
-
- ##
- # Return the record in the form of a dictionary
-
- def as_dict(self):
- """
- Return the record in the form of a dictionary
- """
- return dict(self)
-
- ##
- # Load the record from a dictionary
- #
- # @param dict dictionary to load record public from
-
- def load_from_dict(self, dict):
- """
- Load the record from a dictionary
- """
-
- self.set_name(dict['hrn'])
- gidstr = dict.get("gid", None)
- if gidstr:
- self.set_gid(dict['gid'])
-
- if "pointer" in dict:
- self.set_pointer(dict['pointer'])
-
- self.set_type(dict['type'])
- self.update(dict)
-
- ##
- # Save the record to a string. The string contains an XML representation of
- # the record.
-
- def save_to_string(self):
- """
- Save the record to a string. The string contains an XML representation of
- the record.
- """
- recorddict = self.as_dict()
- filteredDict = dict([(key, val) for (key, val) in recorddict.iteritems() if key in self.fields.keys()])
- xml_record = XML('<record/>')
- xml_record.parse_dict(filteredDict)
- str = xml_record.toxml()
- return str
-
- ##
- # Load the record from a string. The string is assumed to contain an XML
- # representation of the record.
-
- def load_from_string(self, str):
- """
- Load the record from a string. The string is assumed to contain an XML
- representation of the record.
- """
- #dict = xmlrpclib.loads(str)[0][0]
-
- xml_record = XML(str)
- self.load_from_dict(xml_record.todict())
-
- ##
- # Dump the record to stdout
- #
- # @param dump_parents if true, then the parents of the GID will be dumped
-
- def dump_text(self, dump_parents=False):
- """
- Walk tree and dump records.
- """
- # print core fields in this order
- print "".join(['=' for i in range(40)])
- print "RECORD"
- print " hrn:", self.get('hrn')
- print " type:", self.get('type')
- print " authority:", self.get('authority')
- date_created = datetime_to_string(utcparse(self.get('date_created')))
- print " date created:", date_created
- last_updated = datetime_to_string(utcparse(self.get('last_updated')))
- print " last updated:", last_updated
- print " gid:"
- print "\t\t", self.get_gid_object().dump_string(8, dump_parents)
-
- # print remaining fields
- all_fields = set(UserRecord.fields.keys() +
- AuthorityRecord.fields.keys() +
- SliceRecord.fields.keys() +
- NodeRecord.fields.keys())
- for field in self:
- # dont print core fields
- if field in all_fields and field not in SfaRecord.fields:
- print " %s: %s" % (field, self[field])
-
- def dump(self, format=None, dump_parents=False):
- if not format:
- format = 'text'
- else:
- format = format.lower()
-
- if format == 'text':
- self.dump_text(dump_parents)
- elif format == 'xml':
- print self.save_to_string()
- elif format == 'summary':
- print self.summary_string()
- else:
- raise Exception, "Invalid format %s" % format
-
- def summary_string(self):
- return "Record(record_id=%s, hrn=%s, type=%s, authority=%s, pointer=%s)" % \
- (self.get('record_id'), self.get('hrn'), self.get('type'), self.get('authority'), \
- self.get('pointer'))
-
- def getdict(self):
- return dict(self)
-
- def sync(self):
- """
- Sync this record with the database.
- """
- from sfa.storage.table import SfaTable
- table = SfaTable()
- filter = {}
- if self.get('record_id'):
- filter['record_id'] = self.get('record_id')
- if self.get('hrn') and self.get('type'):
- filter['hrn'] = self.get('hrn')
- filter['type'] = self.get('type')
- if self.get('pointer'):
- filter['pointer'] = self.get('pointer')
- existing_records = table.find(filter)
- if not existing_records:
- table.insert(self)
- else:
- existing_record = existing_records[0]
- self['record_id'] = existing_record['record_id']
- table.update(self)
-
- def delete(self):
- """
- Remove record from the database.
- """
- from sfa.storage.table import SfaTable
- table = SfaTable()
- filter = {}
- if self.get('record_id'):
- filter['record_id'] = self.get('record_id')
- if self.get('hrn') and self.get('type'):
- filter['hrn'] = self.get('hrn')
- filter['type'] = self.get('type')
- if self.get('pointer'):
- filter['pointer'] = self.get('pointer')
- if filter:
- existing_records = table.find(filter)
- for record in existing_records:
- table.remove(record)
-
-class UserRecord(SfaRecord):
-
- fields = {
- 'email': Parameter(str, 'email'),
- 'first_name': Parameter(str, 'First name'),
- 'last_name': Parameter(str, 'Last name'),
- 'phone': Parameter(str, 'Phone Number'),
- 'keys': Parameter(str, 'Public key'),
- 'slices': Parameter([str], 'List of slices this user belongs to'),
- }
- fields.update(SfaRecord.fields)
-
-class SliceRecord(SfaRecord):
- fields = {
- 'name': Parameter(str, 'Slice name'),
- 'url': Parameter(str, 'Slice url'),
- 'expires': Parameter(int, 'Date and time this slice exipres'),
- 'researcher': Parameter([str], 'List of users for this slice'),
- 'PI': Parameter([str], 'List of PIs responsible for this slice'),
- 'description': Parameter([str], 'Description of this slice'),
- }
- fields.update(SfaRecord.fields)
-
-
-class NodeRecord(SfaRecord):
- fields = {
- 'hostname': Parameter(str, 'This nodes dns name'),
- 'node_type': Parameter(str, 'Type of node this is'),
- 'latitude': Parameter(str, 'latitude'),
- 'longitude': Parameter(str, 'longitude'),
- }
- fields.update(SfaRecord.fields)
-
-
-class AuthorityRecord(SfaRecord):
- fields = {
- 'name': Parameter(str, 'Name'),
- 'login_base': Parameter(str, 'login base'),
- 'enabled': Parameter(bool, 'Is this site enabled'),
- 'url': Parameter(str, 'URL'),
- 'nodes': Parameter([str], 'List of nodes at this site'),
- 'operator': Parameter([str], 'List of operators'),
- 'researcher': Parameter([str], 'List of researchers'),
- 'PI': Parameter([str], 'List of Principal Investigators'),
- }
- fields.update(SfaRecord.fields)
-
-
+++ /dev/null
-
-class Row(dict):
-
- # Set this to the name of the table that stores the row.
- # e.g. table_name = "nodes"
- table_name = None
-
- # Set this to the name of the primary key of the table. It is
- # assumed that the this key is a sequence if it is not set when
- # sync() is called.
- # e.g. primary_key="record_id"
- primary_key = None
-
- # Set this to the names of tables that reference this table's
- # primary key.
- join_tables = []
-
- def validate(self):
- """
- Validates values. Will validate a value with a custom function
- if a function named 'validate_[key]' exists.
- """
- # Warn about mandatory fields
- # XX TODO: Support checking for mandatory fields later
- #mandatory_fields = self.db.fields(self.table_name, notnull = True, hasdef = False)
- #for field in mandatory_fields:
- # if not self.has_key(field) or self[field] is None:
- # raise SfaInvalidArgument, field + " must be specified and cannot be unset in class %s"%self.__class__.__name__
-
- # Validate values before committing
- for (key, value) in self.iteritems():
- if value is not None and hasattr(self, 'validate_' + key):
- validate = getattr(self, 'validate_' + key)
- self[key] = validate(value)
-
-
- def validate_timestamp(self, timestamp, check_future = False):
- """
- Validates the specified GMT timestamp string (must be in
- %Y-%m-%d %H:%M:%S format) or number (seconds since UNIX epoch,
- i.e., 1970-01-01 00:00:00 GMT). If check_future is True,
- raises an exception if timestamp is not in the future. Returns
- a GMT timestamp string.
- """
-
- time_format = "%Y-%m-%d %H:%M:%S"
- if isinstance(timestamp, StringTypes):
- # calendar.timegm() is the inverse of time.gmtime()
- timestamp = calendar.timegm(time.strptime(timestamp, time_format))
-
- # Human readable timestamp string
- human = time.strftime(time_format, time.gmtime(timestamp))
-
- if check_future and timestamp < time.time():
- raise SfaInvalidArgument, "'%s' not in the future" % human
-
- return human
-
+++ /dev/null
---
--- SFA database schema
---
-
-SET client_encoding = 'UNICODE';
-
---------------------------------------------------------------------------------
--- Version
---------------------------------------------------------------------------------
-
--- Database version
-CREATE TABLE sfa_db_version (
- version integer NOT NULL,
- subversion integer NOT NULL DEFAULT 0
-) WITH OIDS;
-
--- the migration scripts do not use the major 'version' number
--- so 5.0 sets subversion at 100
--- in case your database misses the site and persons tags feature,
--- you might wish to first upgrade to 4.3-rc16 before moving to some 5.0
--- or run the up script here
--- http://svn.planet-lab.org/svn/PLCAPI/branches/4.3/migrations/
-
-INSERT INTO sfa_db_version (version, subversion) VALUES (1, 1);
-
---------------------------------------------------------------------------------
--- Aggregates and store procedures
---------------------------------------------------------------------------------
-
--- Like MySQL GROUP_CONCAT(), this function aggregates values into a
--- PostgreSQL array.
-CREATE AGGREGATE array_accum (
- sfunc = array_append,
- basetype = anyelement,
- stype = anyarray,
- initcond = '{}'
-);
-
--- Valid record types
-CREATE TABLE record_types (
- record_type text PRIMARY KEY
-) WITH OIDS;
-INSERT INTO record_types (record_type) VALUES ('authority');
-INSERT INTO record_types (record_type) VALUES ('authority+sa');
-INSERT INTO record_types (record_type) VALUES ('authority+am');
-INSERT INTO record_types (record_type) VALUES ('authority+sm');
-INSERT INTO record_types (record_type) VALUES ('user');
-INSERT INTO record_types (record_type) VALUES ('slice');
-INSERT INTO record_types (record_type) VALUES ('node');
-
-
--- main table
-CREATE TABLE records (
- record_id serial PRIMARY KEY ,
- hrn text NOT NULL,
- authority text NOT NULL,
- peer_authority text,
- gid text,
- type text REFERENCES record_types,
- pointer integer,
- date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP
-);
-CREATE INDEX sfa_hrn_ids on records (hrn);
-CREATE INDEX sfa_type_ids on records (type);
-CREATE INDEX sfa_authority_ids on records (authority);
-CREATE INDEX sfa_peer_authority_ids on records (peer_authority);
-CREATE INDEX sfa_pointer_ids on records (pointer);
+++ /dev/null
-#
-# implements support for SFA records stored in db tables
-#
-# TODO: Use existing PLC database methods? or keep this separate?
-
-from types import StringTypes
-
-from sfa.util.config import Config
-
-from sfa.storage.parameter import Parameter
-from sfa.storage.filter import Filter
-from sfa.storage.PostgreSQL import PostgreSQL
-from sfa.storage.record import SfaRecord, AuthorityRecord, NodeRecord, SliceRecord, UserRecord
-
-class SfaTable(list):
-
- SFA_TABLE_PREFIX = "records"
-
- def __init__(self, record_filter = None):
-
- # pgsql doesn't like table names with "." in them, to replace it with "$"
- self.tablename = SfaTable.SFA_TABLE_PREFIX
- self.config = Config()
- self.db = PostgreSQL(self.config)
-
- if record_filter:
- records = self.find(record_filter)
- for record in records:
- self.append(record)
-
- def db_fields(self, obj=None):
-
- db_fields = self.db.fields(self.SFA_TABLE_PREFIX)
- return dict( [ (key,value) for (key, value) in obj.iteritems() \
- if key in db_fields and
- self.is_writable(key, value, SfaRecord.fields)] )
-
- @staticmethod
- def is_writable (key,value,dict):
- # if not mentioned, assume it's writable (e.g. deleted ...)
- if key not in dict: return True
- # if mentioned but not linked to a Parameter object, idem
- if not isinstance(dict[key], Parameter): return True
- # if not marked ro, it's writable
- if not dict[key].ro: return True
-
- return False
-
-
- def clear (self):
- self.db.do("DELETE from %s"%self.tablename)
- self.db.commit()
-
- # what sfa-nuke does
- def nuke (self):
- self.clear()
-
- def remove(self, record):
- params = {'record_id': record['record_id']}
- template = "DELETE FROM %s " % self.tablename
- sql = template + "WHERE record_id = %(record_id)s"
- self.db.do(sql, params)
-
- # if this is a site, remove all records where 'authority' == the
- # site's hrn
- if record['type'] == 'authority':
- params = {'authority': record['hrn']}
- sql = template + "WHERE authority = %(authority)s"
- self.db.do(sql, params)
- self.db.commit()
-
- def insert(self, record):
- db_fields = self.db_fields(record)
- keys = db_fields.keys()
- values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()]
- query_str = "INSERT INTO " + self.tablename + \
- "(" + ",".join(keys) + ") " + \
- "VALUES(" + ",".join(values) + ")"
- self.db.do(query_str, db_fields)
- self.db.commit()
- result = self.find({'hrn': record['hrn'], 'type': record['type'], 'peer_authority': record['peer_authority']})
- if not result:
- record_id = None
- elif isinstance(result, list):
- record_id = result[0]['record_id']
- else:
- record_id = result['record_id']
-
- return record_id
-
- def update(self, record):
- db_fields = self.db_fields(record)
- keys = db_fields.keys()
- values = [self.db.param(key, value) for (key, value) in db_fields.iteritems()]
- columns = ["%s = %s" % (key, value) for (key, value) in zip(keys, values)]
- query_str = "UPDATE %s SET %s WHERE record_id = %s" % \
- (self.tablename, ", ".join(columns), record['record_id'])
- self.db.do(query_str, db_fields)
- self.db.commit()
-
- def quote_string(self, value):
- return str(self.db.quote(value))
-
- def quote(self, value):
- return self.db.quote(value)
-
- def find(self, record_filter = None, columns=None):
- if not columns:
- columns = "*"
- else:
- columns = ",".join(columns)
- sql = "SELECT %s FROM %s WHERE True " % (columns, self.tablename)
-
- if isinstance(record_filter, (list, tuple, set)):
- ints = filter(lambda x: isinstance(x, (int, long)), record_filter)
- strs = filter(lambda x: isinstance(x, StringTypes), record_filter)
- record_filter = Filter(SfaRecord.all_fields, {'record_id': ints, 'hrn': strs})
- sql += "AND (%s) %s " % record_filter.sql("OR")
- elif isinstance(record_filter, dict):
- record_filter = Filter(SfaRecord.all_fields, record_filter)
- sql += " AND (%s) %s" % record_filter.sql("AND")
- elif isinstance(record_filter, StringTypes):
- record_filter = Filter(SfaRecord.all_fields, {'hrn':[record_filter]})
- sql += " AND (%s) %s" % record_filter.sql("AND")
- elif isinstance(record_filter, int):
- record_filter = Filter(SfaRecord.all_fields, {'record_id':[record_filter]})
- sql += " AND (%s) %s" % record_filter.sql("AND")
-
- results = self.db.selectall(sql)
- if isinstance(results, dict):
- results = [results]
- return results
-
- def findObjects(self, record_filter = None, columns=None):
-
- results = self.find(record_filter, columns)
- result_rec_list = []
- for result in results:
- if result['type'] in ['authority']:
- result_rec_list.append(AuthorityRecord(dict=result))
- elif result['type'] in ['node']:
- result_rec_list.append(NodeRecord(dict=result))
- elif result['type'] in ['slice']:
- result_rec_list.append(SliceRecord(dict=result))
- elif result['type'] in ['user']:
- result_rec_list.append(UserRecord(dict=result))
- else:
- result_rec_list.append(SfaRecord(dict=result))
- return result_rec_list
-
-
self.decode()
return self.gidObject.get_printable_subject()
+ # sounds like this should be __repr__ instead ??
def get_summary_tostring(self):
if not self.gidObject:
self.decode()
# subdirectory are several files:
# *.GID - GID file
# *.PKEY - private key file
-# *.DBINFO - database info
##
import os
gid_object = None
gid_filename = None
privkey_filename = None
- dbinfo_filename = None
##
# Initialize and authority object.
#
# @param xrn the human readable name of the authority (urn will be converted to hrn)
# @param gid_filename the filename containing the GID
# @param privkey_filename the filename containing the private key
- # @param dbinfo_filename the filename containing the database info
- def __init__(self, xrn, gid_filename, privkey_filename, dbinfo_filename):
+ def __init__(self, xrn, gid_filename, privkey_filename):
hrn, type = urn_to_hrn(xrn)
self.hrn = hrn
self.set_gid_filename(gid_filename)
self.privkey_filename = privkey_filename
- self.dbinfo_filename = dbinfo_filename
##
# Set the filename of the GID
def get_pkey_object(self):
return Keypair(filename = self.privkey_filename)
- ##
- # Get the dbinfo in the form of a dictionary
-
- def get_dbinfo(self):
- f = file(self.dbinfo_filename)
- dict = eval(f.read())
- f.close()
- return dict
-
##
# Replace the GID with a new one. The file specified by gid_filename is
# overwritten with the new GID object
#
# The tree is stored on disk in a hierarchical manner than reflects the
# structure of the tree. Each authority is a subdirectory, and each subdirectory
-# contains the GID, pkey, and dbinfo files for that authority (as well as
+# contains the GID and pkey files for that authority (as well as
# subdirectories for each sub-authority)
class Hierarchy:
basedir = os.path.join(self.config.SFA_DATA_DIR, "authorities")
self.basedir = basedir
##
- # Given a hrn, return the filenames of the GID, private key, and dbinfo
+ # Given a hrn, return the filenames of the GID, private key
# files.
#
# @param xrn the human readable name of the authority (urn will be convertd to hrn)
gid_filename = os.path.join(directory, leaf+".gid")
privkey_filename = os.path.join(directory, leaf+".pkey")
- dbinfo_filename = os.path.join(directory, leaf+".dbinfo")
- return (directory, gid_filename, privkey_filename, dbinfo_filename)
+ return (directory, gid_filename, privkey_filename)
##
# Check to see if an authority exists. An authority exists if it's disk
def auth_exists(self, xrn):
hrn, type = urn_to_hrn(xrn)
- (directory, gid_filename, privkey_filename, dbinfo_filename) = \
+ (directory, gid_filename, privkey_filename) = \
self.get_auth_filenames(hrn)
- return os.path.exists(gid_filename) and \
- os.path.exists(privkey_filename) and \
- os.path.exists(dbinfo_filename)
+ return os.path.exists(gid_filename) and os.path.exists(privkey_filename)
##
# Create an authority. A private key for the authority and the associated
parent_urn = hrn_to_urn(parent_hrn, 'authority')
if (parent_hrn) and (not self.auth_exists(parent_urn)) and (create_parents):
self.create_auth(parent_urn, create_parents)
- (directory, gid_filename, privkey_filename, dbinfo_filename) = \
+ (directory, gid_filename, privkey_filename,) = \
self.get_auth_filenames(hrn)
# create the directory to hold the files
gid = self.create_gid(xrn, create_uuid(), pkey)
gid.save_to_file(gid_filename, save_parents=True)
- # XXX TODO: think up a better way for the dbinfo to work
-
- dbinfo = Config().get_plc_dbinfo()
- dbinfo_file = file(dbinfo_filename, "w")
- dbinfo_file.write(str(dbinfo))
- dbinfo_file.close()
-
def create_top_level_auth(self, hrn=None):
"""
Create top level records (includes root and sub authorities (local/remote)
logger.warning("Hierarchy: missing authority - xrn=%s, hrn=%s"%(xrn,hrn))
raise MissingAuthority(hrn)
- (directory, gid_filename, privkey_filename, dbinfo_filename) = \
+ (directory, gid_filename, privkey_filename, ) = \
self.get_auth_filenames(hrn)
- auth_info = AuthInfo(hrn, gid_filename, privkey_filename, dbinfo_filename)
+ auth_info = AuthInfo(hrn, gid_filename, privkey_filename)
# check the GID and see if it needs to be refreshed
gid = auth_info.get_gid_object()
rl.add("bind")
rl.add("control")
rl.add("info")
+# wouldn't that be authority+cm instead ?
elif type == "component":
rl.add("operator")
return rl
else:
return "plc"
- def get_plc_dbinfo(self):
- return {
- 'dbname' : self.SFA_DB_NAME,
- 'address' : self.SFA_DB_HOST,
- 'port' : self.SFA_DB_PORT,
- 'user' : self.SFA_DB_USER,
- 'password' : self.SFA_DB_PASSWORD
- }
-
# TODO: find a better place to put this method
def get_max_aggrMgr_info(self):
am_apiclient_path = '/usr/local/MAXGENI_AM_APIClient'
except:
- from sfa.util.logging import logger
+ from sfa.util.sfalogging import logger
def run_sfatables (_,__,___, rspec, ____=None):
logger.warning("Cannot import sfatables.runtime, please install package sfa-sfatables")
return rspec
# if not type:
# debug_logger.debug("type-less Xrn's are not safe")
+ def __repr__ (self):
+ result="<XRN u=%s h=%s"%(self.urn,self.hrn)
+ if hasattr(self,'leaf'): result += " leaf=%s"%self.leaf
+ if hasattr(self,'authority'): result += " auth=%s"%self.authority
+ result += ">"
+ return result
+
def get_urn(self): return self.urn
def get_hrn(self): return self.hrn
def get_type(self): return self.type
self._normalize()
return self.leaf
- def get_authority_hrn(self):
+ def get_authority_hrn(self):
self._normalize()
return '.'.join( self.authority )
from testKeypair import *
# xxx broken-test
#from testHierarchy import *
-from testRecord import *
+from testStorage import *
if __name__ == "__main__":
unittest.main()
import unittest
from sfa.trust.gid import *
from sfa.util.config import *
-from sfa.storage.record import *
+from sfa.storage.model import RegRecord
-class TestRecord(unittest.TestCase):
+class TestStorage(unittest.TestCase):
def setUp(self):
pass
def testCreate(self):
- r = SfaRecord()
+ r = RegRecord(type='authority',hrn='foo.bar')
if __name__ == "__main__":
unittest.main()