From d210f49f112e71c866e79e6b62ae71c9ec0a3af1 Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Thu, 26 Jan 2012 19:39:54 +0100 Subject: [PATCH] should be able to upgrade from any version --- init.d/sfa | 45 +++--------- sfa/importer/sfa-import-plc.py | 4 +- sfa/importer/sfa-nuke-plc.py | 9 +-- sfa/server/registry.py | 9 +-- sfa/storage/alchemy.py | 9 +-- sfa/storage/dbschema.py | 110 +++++++++++++++++++++++++++++ sfa/storage/migrations/migrate.cfg | 4 +- sfa/storage/model.py | 20 +++--- 8 files changed, 146 insertions(+), 64 deletions(-) create mode 100644 sfa/storage/dbschema.py diff --git a/init.d/sfa b/init.d/sfa index 7cb510c3..31d7cd7a 100755 --- a/init.d/sfa +++ b/init.d/sfa @@ -1,4 +1,4 @@ -#!/bin/bash +!/bin/bash # # sfa Wraps PLCAPI into the SFA compliant API # @@ -65,43 +65,13 @@ DATE=$(date +"%Y-%m-%d-%H-%M-%S") # Dumps the database - optional argument to specify filename suffix function dump_sfa_db() { if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi - dumpfile=/var/lib/pgsql/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql") + mkdir -p /usr/share/sfa/backups + dumpfile=/usr/share/sfa/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql") pg_dump -U $SFA_DB_USER $SFA_DB_NAME > $dumpfile + echo "Saved sfa database in $dumpfile" check } -# Updates the database by applying migration scripts -# using sqlalchemy-migrate for this -# migrate repo is /usr/share/sfa/migrations/ -# and version scripts are thus in /usr/share/sfa/migrations/versions/ -function db_migrate() { - - # only if enabled - [ "$SFA_DB_ENABLED" == 1 ] || return - - alchemy_url="postgresql+psycopg2://${SFA_DB_USER}:${SFA_DB_PASSWORD}@:${SFA_DB_PORT}/${SFA_DB_NAME}" - migrate_repo="/usr/share/sfa/migrations" - # check if under version control, and initialize it otherwise - if ! sqlalchemy-migrate db_version --url=$alchemy_url $migrate_repo >& /dev/null ; then - # at this point it would make sense to - # check that the 'users' table is in the db - # if not, backup/dump, and run a nuke/import - # and then only init the db version to 0 - echo -n $"version init in db ${SFA_DB_NAME}" - sqlalchemy-migrate version_control --url=$alchemy_url $migrate_repo - fi - version_before=$(sqlalchemy-migrate db_version --url=$alchemy_url $migrate_repo) - check - sqlalchemy-migrate upgrade --url=$alchemy_url $migrate_repo 2> /dev/null - check - version_after=$(sqlalchemy-migrate db_version --url=$alchemy_url $migrate_repo) - if [ "$version_before" != "$version_after" -o "$ERRORS" != 0 ] ; then - MESSAGE=$"DB version: $version_before -> $version_after" - echo -n "$MESSAGE" - [ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo - fi -} - # Regenerate configuration files - almost verbatim from plc.init function reload () { force=$1 @@ -288,7 +258,7 @@ function start() { reload db_start - db_migrate + # migrations are now handled in the code by sfa.storage.dbschema # install peer certs action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS @@ -330,8 +300,11 @@ case "$1" in status sfa-start.py RETVAL=$? ;; + dbdump) + dump_sfa_db + ;; *) - echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}" + echo $"Usage: $0 {start|stop|reload|restart|condrestart|status|dbdump}" exit 1 ;; esac diff --git a/sfa/importer/sfa-import-plc.py b/sfa/importer/sfa-import-plc.py index cba43235..36617bb9 100755 --- a/sfa/importer/sfa-import-plc.py +++ b/sfa/importer/sfa-import-plc.py @@ -66,8 +66,8 @@ def _get_site_hrn(interface_hrn, site): hrn = ".".join([interface_hrn, "internet2", site['login_base']]) return hrn -# maybe could use a call to model.init_tables somewhere -# however now import s expected to be done after service creation.. +# one would think this code could use a call to DBSchema +# however now import s expected to be done after service creation def main(): process_options() diff --git a/sfa/importer/sfa-nuke-plc.py b/sfa/importer/sfa-nuke-plc.py index 83c9e4bc..41a4598d 100755 --- a/sfa/importer/sfa-nuke-plc.py +++ b/sfa/importer/sfa-nuke-plc.py @@ -13,8 +13,8 @@ from optparse import OptionParser from sfa.util.sfalogging import logger -from sfa.storage.alchemy import dbsession -from sfa.storage.model import init_tables,drop_tables +from sfa.storage.alchemy import engine +from sfa.storage.dbschema import DBSchema def main(): usage="%prog: trash the registry DB" @@ -29,14 +29,15 @@ def main(): if args: parser.print_help() sys.exit(1) + dbschema=DBSchema() logger.info("Purging SFA records from database") - drop_tables(dbsession) + dbschema.nuke() # for convenience we re-create the schema here, so there's no need for an explicit # service sfa restart # however in some (upgrade) scenarios this might be wrong if options.reinit: logger.info("re-creating empty schema") - init_tables(dbsession) + dbschema.init_or_upgrade(engine) if options.clean_certs: # remove the server certificate and all gids found in /var/lib/sfa/authorities diff --git a/sfa/server/registry.py b/sfa/server/registry.py index 0919a1bb..13a75fc7 100644 --- a/sfa/server/registry.py +++ b/sfa/server/registry.py @@ -20,10 +20,11 @@ class Registry(SfaServer): def __init__(self, ip, port, key_file, cert_file): SfaServer.__init__(self, ip, port, key_file, cert_file,'registry') - # initialize db schema - from sfa.storage.alchemy import dbsession - from sfa.storage.model import init_tables - init_tables(dbsession) + sfa_config=Config() + if Config().SFA_REGISTRY_ENABLED: + from sfa.storage.alchemy import engine + from sfa.storage.dbschema import DBSchema + DBSchema().init_or_upgrade() # # Registries is a dictionary of registry connections keyed on the registry hrn diff --git a/sfa/storage/alchemy.py b/sfa/storage/alchemy.py index 917d878d..7e001166 100644 --- a/sfa/storage/alchemy.py +++ b/sfa/storage/alchemy.py @@ -25,15 +25,16 @@ class Alchemy: # we indeed have /var/lib/pgsql/data/postgresql.conf where # this setting is unset, it might be an angle to tweak that if need be # try a unix socket first - omitting the hostname does the trick - unix_desc = "postgresql+psycopg2://%s:%s@:%s/%s"%\ + unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\ (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname) # the TCP fallback method - tcp_desc = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\ + tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\ (config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname) - for engine_desc in [ unix_desc, tcp_desc ] : + for url in [ unix_url, tcp_url ] : try: - self.engine = create_engine (engine_desc) + self.engine = create_engine (url) self.check() + self.url=url return except: pass diff --git a/sfa/storage/dbschema.py b/sfa/storage/dbschema.py new file mode 100644 index 00000000..ed096a9a --- /dev/null +++ b/sfa/storage/dbschema.py @@ -0,0 +1,110 @@ +import sys +import traceback + +from sqlalchemy import MetaData, Table +from sqlalchemy.exc import NoSuchTableError + +from migrate.versioning.api import version, db_version, version_control, upgrade + +from sfa.util.sfalogging import logger +from sfa.storage.model import init_tables + +### this script will upgrade from a pre-2.1 db +# * 1.0 and up to 1.1-4: ('very old') +# was piggybacking the planetlab5 database +# this is kind of out of our scope here, we don't have the credentials +# to connect to planetlab5, but this is documented in +# https://svn.planet-lab.org/wiki/SFATutorialConfigureSFA#Upgradingnotes +# and essentially this is seamless to users +# * from 1.1-5 up to 2.0-x: ('old') +# uses the 'sfa' db and essentially the 'records' table, +# as well as record_types +# together with an 'sfa_db_version' table (version, subversion) +# * from 2.1: +# we have an 'records' table, plus 'users' and the like +# and once migrate has kicked in there is a table named +# migrate_db_version (repository_id, repository_path, version) +#### +# An initial attempt to run this as a 001_*.py migrate script +# did not quite work out (essentially we need to set the current version +# number out of the migrations logic) +# also this approach has less stuff in the initscript, which seems just right + +class DBSchema: + + header="Upgrading to 2.1 or higher" + + def __init__ (self): + from sfa.storage.alchemy import alchemy + self.url=alchemy.url + self.engine=alchemy.engine + self.repository="/usr/share/sfa/migrations" + self.meta=MetaData (bind=self.engine) + + def current_version (self): + try: + return db_version (self.url, self.repository) + except: + return None + + def table_exists (self, tablename): + try: + table=Table (tablename, self.meta, autoload=True) + return True + except NoSuchTableError: + return False + + def drop_table (self, tablename): + if self.table_exists (tablename): + print >>sys.stderr, "%s: Dropping table %s"%(DBSchema.header,tablename) + self.engine.execute ("drop table %s cascade"%tablename) + else: + print >>sys.stderr, "%s: no need to drop table %s"%(DBSchema.header,tablename) + + def handle_old_releases (self): + try: + # try to find out which old version this can be + if not self.table_exists ('records'): + # this likely means we've just created the db, so it's either a fresh install + # or we come from a 'very old' depl. + # in either case, an import is required but there's nothing to clean up + print >> sys.stderr,"%s: make sure to run import"%(DBSchema.header,) + elif self.table_exists ('sfa_db_version'): + # we come from an 'old' version + self.drop_table ('records') + self.drop_table ('record_types') + self.drop_table ('sfa_db_version') + else: + # we should be good here + pass + except: + print >> sys.stderr, "%s: unknown exception"%(DBSchema.header,) + traceback.print_exc () + + # after this call the db schema and the version as known by migrate should + # reflect the current data model and the latest known version + def init_or_upgrade (self): + # check if under version control, and initialize it otherwise + if self.current_version() is None: + before="Unknown" + # can be either a very old version, or a fresh install + # for very old versions: + self.handle_old_releases() + # in any case, initialize db from current code and reflect in migrate + init_tables(self.engine) + code_version = version (self.repository) + version_control (self.url, self.repository, code_version) + else: + # use migrate in the usual way + before="%s"%self.current_version() + upgrade (self.url, self.repository) + after="%s"%self.current_version() + if before != after: + logger.info("DBSchema : upgraded from %s to %s"%(before,after)) + + # this call will trash the db altogether + def nuke (self): + drop_tables(self.engine) + +if __name__ == '__main__': + DBSchema().init_or_upgrade() diff --git a/sfa/storage/migrations/migrate.cfg b/sfa/storage/migrations/migrate.cfg index eb55261e..c570dd90 100644 --- a/sfa/storage/migrations/migrate.cfg +++ b/sfa/storage/migrations/migrate.cfg @@ -1,13 +1,13 @@ [db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. -repository_id=future migrations in the SFA registry database +repository_id=sqlalchemy-migrate repository for SFA-2.1 and on # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. -version_table=sfa_db_version +version_table=migrate_db_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably diff --git a/sfa/storage/model.py b/sfa/storage/model.py index 715ea671..7bb12aa1 100644 --- a/sfa/storage/model.py +++ b/sfa/storage/model.py @@ -212,20 +212,16 @@ class RegNode (RegRecord): # although the db needs of course to be reachable, # the schema management functions are here and not in alchemy # because the actual details of the classes need to be known -def init_tables(dbsession): - logger.info("Initializing db schema and builtin types") - # the doc states we could retrieve the engine this way - # engine=dbsession.get_bind() - # however I'm getting this - # TypeError: get_bind() takes at least 2 arguments (1 given) - # so let's import alchemy - but not from toplevel - from sfa.storage.alchemy import engine +# migrations: this code has no notion of the previous versions +# of the data model nor of migrations +# sfa.storage.migrations.db_init uses this when starting from +# a fresh db only +def init_tables(engine): + logger.info("Initializing db schema from current/latest model") Base.metadata.create_all(engine) -def drop_tables(dbsession): - logger.info("Dropping tables") - # same as for init_tables - from sfa.storage.alchemy import engine +def drop_tables(engine): + logger.info("Dropping tables from current/latest model") Base.metadata.drop_all(engine) ############################## -- 2.43.0