-#!/bin/bash
+!/bin/bash
#
# sfa Wraps PLCAPI into the SFA compliant API
#
# Dumps the database - optional argument to specify filename suffix
function dump_sfa_db() {
if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi
- dumpfile=/var/lib/pgsql/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql")
+ mkdir -p /usr/share/sfa/backups
+ dumpfile=/usr/share/sfa/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql")
pg_dump -U $SFA_DB_USER $SFA_DB_NAME > $dumpfile
+ echo "Saved sfa database in $dumpfile"
check
}
-# Updates the database by applying migration scripts
-# using sqlalchemy-migrate for this
-# migrate repo is /usr/share/sfa/migrations/
-# and version scripts are thus in /usr/share/sfa/migrations/versions/
-function db_migrate() {
-
- # only if enabled
- [ "$SFA_DB_ENABLED" == 1 ] || return
-
- alchemy_url="postgresql+psycopg2://${SFA_DB_USER}:${SFA_DB_PASSWORD}@:${SFA_DB_PORT}/${SFA_DB_NAME}"
- migrate_repo="/usr/share/sfa/migrations"
- # check if under version control, and initialize it otherwise
- if ! sqlalchemy-migrate db_version --url=$alchemy_url $migrate_repo >& /dev/null ; then
- # at this point it would make sense to
- # check that the 'users' table is in the db
- # if not, backup/dump, and run a nuke/import
- # and then only init the db version to 0
- echo -n $"version init in db ${SFA_DB_NAME}"
- sqlalchemy-migrate version_control --url=$alchemy_url $migrate_repo
- fi
- version_before=$(sqlalchemy-migrate db_version --url=$alchemy_url $migrate_repo)
- check
- sqlalchemy-migrate upgrade --url=$alchemy_url $migrate_repo 2> /dev/null
- check
- version_after=$(sqlalchemy-migrate db_version --url=$alchemy_url $migrate_repo)
- if [ "$version_before" != "$version_after" -o "$ERRORS" != 0 ] ; then
- MESSAGE=$"DB version: $version_before -> $version_after"
- echo -n "$MESSAGE"
- [ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
- fi
-}
-
# Regenerate configuration files - almost verbatim from plc.init
function reload () {
force=$1
reload
db_start
- db_migrate
+ # migrations are now handled in the code by sfa.storage.dbschema
# install peer certs
action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS
status sfa-start.py
RETVAL=$?
;;
+ dbdump)
+ dump_sfa_db
+ ;;
*)
- echo $"Usage: $0 {start|stop|reload|restart|condrestart|status}"
+ echo $"Usage: $0 {start|stop|reload|restart|condrestart|status|dbdump}"
exit 1
;;
esac
hrn = ".".join([interface_hrn, "internet2", site['login_base']])
return hrn
-# maybe could use a call to model.init_tables somewhere
-# however now import s expected to be done after service creation..
+# one would think this code could use a call to DBSchema
+# however now import s expected to be done after service creation
def main():
process_options()
from sfa.util.sfalogging import logger
-from sfa.storage.alchemy import dbsession
-from sfa.storage.model import init_tables,drop_tables
+from sfa.storage.alchemy import engine
+from sfa.storage.dbschema import DBSchema
def main():
usage="%prog: trash the registry DB"
if args:
parser.print_help()
sys.exit(1)
+ dbschema=DBSchema()
logger.info("Purging SFA records from database")
- drop_tables(dbsession)
+ dbschema.nuke()
# for convenience we re-create the schema here, so there's no need for an explicit
# service sfa restart
# however in some (upgrade) scenarios this might be wrong
if options.reinit:
logger.info("re-creating empty schema")
- init_tables(dbsession)
+ dbschema.init_or_upgrade(engine)
if options.clean_certs:
# remove the server certificate and all gids found in /var/lib/sfa/authorities
def __init__(self, ip, port, key_file, cert_file):
SfaServer.__init__(self, ip, port, key_file, cert_file,'registry')
- # initialize db schema
- from sfa.storage.alchemy import dbsession
- from sfa.storage.model import init_tables
- init_tables(dbsession)
+ sfa_config=Config()
+ if Config().SFA_REGISTRY_ENABLED:
+ from sfa.storage.alchemy import engine
+ from sfa.storage.dbschema import DBSchema
+ DBSchema().init_or_upgrade()
#
# Registries is a dictionary of registry connections keyed on the registry hrn
# we indeed have /var/lib/pgsql/data/postgresql.conf where
# this setting is unset, it might be an angle to tweak that if need be
# try a unix socket first - omitting the hostname does the trick
- unix_desc = "postgresql+psycopg2://%s:%s@:%s/%s"%\
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"%\
(config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_PORT,dbname)
# the TCP fallback method
- tcp_desc = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"%\
(config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
- for engine_desc in [ unix_desc, tcp_desc ] :
+ for url in [ unix_url, tcp_url ] :
try:
- self.engine = create_engine (engine_desc)
+ self.engine = create_engine (url)
self.check()
+ self.url=url
return
except:
pass
--- /dev/null
+import sys
+import traceback
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+from migrate.versioning.api import version, db_version, version_control, upgrade
+
+from sfa.util.sfalogging import logger
+from sfa.storage.model import init_tables
+
+### this script will upgrade from a pre-2.1 db
+# * 1.0 and up to 1.1-4: ('very old')
+# was piggybacking the planetlab5 database
+# this is kind of out of our scope here, we don't have the credentials
+# to connect to planetlab5, but this is documented in
+# https://svn.planet-lab.org/wiki/SFATutorialConfigureSFA#Upgradingnotes
+# and essentially this is seamless to users
+# * from 1.1-5 up to 2.0-x: ('old')
+# uses the 'sfa' db and essentially the 'records' table,
+# as well as record_types
+# together with an 'sfa_db_version' table (version, subversion)
+# * from 2.1:
+# we have an 'records' table, plus 'users' and the like
+# and once migrate has kicked in there is a table named
+# migrate_db_version (repository_id, repository_path, version)
+####
+# An initial attempt to run this as a 001_*.py migrate script
+# did not quite work out (essentially we need to set the current version
+# number out of the migrations logic)
+# also this approach has less stuff in the initscript, which seems just right
+
+class DBSchema:
+
+ header="Upgrading to 2.1 or higher"
+
+ def __init__ (self):
+ from sfa.storage.alchemy import alchemy
+ self.url=alchemy.url
+ self.engine=alchemy.engine
+ self.repository="/usr/share/sfa/migrations"
+ self.meta=MetaData (bind=self.engine)
+
+ def current_version (self):
+ try:
+ return db_version (self.url, self.repository)
+ except:
+ return None
+
+ def table_exists (self, tablename):
+ try:
+ table=Table (tablename, self.meta, autoload=True)
+ return True
+ except NoSuchTableError:
+ return False
+
+ def drop_table (self, tablename):
+ if self.table_exists (tablename):
+ print >>sys.stderr, "%s: Dropping table %s"%(DBSchema.header,tablename)
+ self.engine.execute ("drop table %s cascade"%tablename)
+ else:
+ print >>sys.stderr, "%s: no need to drop table %s"%(DBSchema.header,tablename)
+
+ def handle_old_releases (self):
+ try:
+ # try to find out which old version this can be
+ if not self.table_exists ('records'):
+ # this likely means we've just created the db, so it's either a fresh install
+ # or we come from a 'very old' depl.
+ # in either case, an import is required but there's nothing to clean up
+ print >> sys.stderr,"%s: make sure to run import"%(DBSchema.header,)
+ elif self.table_exists ('sfa_db_version'):
+ # we come from an 'old' version
+ self.drop_table ('records')
+ self.drop_table ('record_types')
+ self.drop_table ('sfa_db_version')
+ else:
+ # we should be good here
+ pass
+ except:
+ print >> sys.stderr, "%s: unknown exception"%(DBSchema.header,)
+ traceback.print_exc ()
+
+ # after this call the db schema and the version as known by migrate should
+ # reflect the current data model and the latest known version
+ def init_or_upgrade (self):
+ # check if under version control, and initialize it otherwise
+ if self.current_version() is None:
+ before="Unknown"
+ # can be either a very old version, or a fresh install
+ # for very old versions:
+ self.handle_old_releases()
+ # in any case, initialize db from current code and reflect in migrate
+ init_tables(self.engine)
+ code_version = version (self.repository)
+ version_control (self.url, self.repository, code_version)
+ else:
+ # use migrate in the usual way
+ before="%s"%self.current_version()
+ upgrade (self.url, self.repository)
+ after="%s"%self.current_version()
+ if before != after:
+ logger.info("DBSchema : upgraded from %s to %s"%(before,after))
+
+ # this call will trash the db altogether
+ def nuke (self):
+ drop_tables(self.engine)
+
+if __name__ == '__main__':
+ DBSchema().init_or_upgrade()
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
-repository_id=future migrations in the SFA registry database
+repository_id=sqlalchemy-migrate repository for SFA-2.1 and on
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
# If this is changed once a database is under version control, you'll need to
# change the table name in each database too.
-version_table=sfa_db_version
+version_table=migrate_db_version
# When committing a change script, Migrate will attempt to generate the
# sql for all supported databases; normally, if one of them fails - probably
# although the db needs of course to be reachable,
# the schema management functions are here and not in alchemy
# because the actual details of the classes need to be known
-def init_tables(dbsession):
- logger.info("Initializing db schema and builtin types")
- # the doc states we could retrieve the engine this way
- # engine=dbsession.get_bind()
- # however I'm getting this
- # TypeError: get_bind() takes at least 2 arguments (1 given)
- # so let's import alchemy - but not from toplevel
- from sfa.storage.alchemy import engine
+# migrations: this code has no notion of the previous versions
+# of the data model nor of migrations
+# sfa.storage.migrations.db_init uses this when starting from
+# a fresh db only
+def init_tables(engine):
+ logger.info("Initializing db schema from current/latest model")
Base.metadata.create_all(engine)
-def drop_tables(dbsession):
- logger.info("Dropping tables")
- # same as for init_tables
- from sfa.storage.alchemy import engine
+def drop_tables(engine):
+ logger.info("Dropping tables from current/latest model")
Base.metadata.drop_all(engine)
##############################