--- /dev/null
+from types import StringTypes
+import time
+import re
+import datetime
+
+from PLC.Faults import *
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Debug import profile
+from PLC.Timestamp import Timestamp
+from PLC.Storage.AlchemyObject import AlchemyObj
+
+class SliceNode(AlchemyObj):
+ """
+ Representation of a row in the slice_node table. To use, optionally
+ instantiate with a dict of values. Update as you would a
+ dict. Commit to the database with sync().To use, instantiate
+ with a dict of values.
+ """
+
+ tablename = 'slice_person'
+
+ fields = {
+ 'slice_id': Parameter(int, "Slice identifier", primary_key=True),
+ 'node_id': Parameter(int, "Node identifier", indexed=True),
+ }
+
+ tags = {}
+
+ def sync(self, commit = True, validate=True):
+ """
+ Add the record
+ """
+ AlchemyObj.sync(self, commit, validate)
+ AlchemyObj.insert(self, dict(self))
+
+ def delete(self, commit = True):
+ """
+ Delete existing slice.
+ """
+ AlchemyObj.delete(self, dict(self))
+
+
+class SliceNodes(list):
+ """
+ Representation of row(s) from the slices table in the
+ database.
+ """
+
+ def __init__(self, api, filter = None, columns = None):
+
+ # the view that we're selecting upon: start with view_slices
+ if not filter:
+ slice_nodes = SliceNode().select()
+ elif isinstance(filter, dict):
+ slices_nodes = SliceNode().select(filter=filter)
+ else:
+ raise PLCInvalidArgument, "Wrong slice_node filter %r"%filter
+
+ for slice_node in slice_nodes:
+ self.append(slice_node)
--- /dev/null
+from types import StringTypes
+import time
+import re
+import datetime
+
+from PLC.Faults import *
+from PLC.Parameter import Parameter, Mixed
+from PLC.Filter import Filter
+from PLC.Debug import profile
+from PLC.Timestamp import Timestamp
+from PLC.Storage.AlchemyObject import AlchemyObj
+
+class SlicePerson(AlchemyObj):
+ """
+ Representation of a row in the slice_person table. To use, optionally
+ instantiate with a dict of values. Update as you would a
+ dict. Commit to the database with sync().To use, instantiate
+ with a dict of values.
+ """
+
+ tablename = 'slice_person'
+
+ fields = {
+ 'slice_id': Parameter(int, "Slice identifier", primary_key=True),
+ 'person_id': Parameter(int, "Person identifier", indexed=True),
+ }
+ tags = {}
+
+ def sync(self, commit = True, validate=True):
+ """
+ Add the record
+ """
+ AlchemyObj.sync(self, commit, validate)
+ AlchemyObj.insert(self, dict(self))
+
+ def delete(self, commit = True):
+ """
+ Delete existing slice.
+ """
+ AlchemyObj.delete(self, dict(self))
+
+
+class SlicePersons(list):
+ """
+ Representation of row(s) from the slices table in the
+ database.
+ """
+
+ def __init__(self, api, filter = None, columns = None):
+
+ # the view that we're selecting upon: start with view_slices
+ if not filter:
+ slice_persons = SlicePerson().select()
+ elif isinstance(filter, dict):
+ slice_persons = SlicePerson().select(filter=filter)
+ else:
+ raise PLCInvalidArgument, "Wrong slice_person filter %r"%filter
+
+ for slice_person in slice_persons:
+ self.append(slice_person)
class AlchemyObj(Record):
def __init__(self, api=None, fields = {}, object=None):
+ Record.__init__(self, dict=fields, object=object)
self.api=api
- Record.__init__(self, dict=fields)
def __iter__(self):
self._i = iter(object_mapper(self).columns)
table = Table(self.tablename, metadata)
for field in self.fields:
param = self.fields[field]
+ # skip read only params
+ if param.ro:
+ continue
type = String
if param.type == int:
type = Integer
elif param.type == datetime:
type = DateTime
- column = Column(field, type, primary_key=param.primary_key)
+ column = Column(field, type,
+ nullable = param.nullok,
+ indexed = param.indexed,
+ primary_key=param.primary_key)
table.append_column(column)
if not table.exists():
table.create()
result = dbsession.execute(table.insert().values(values))
dbsession.commit()
return result
-
- def updatedb(self, values):
+ def updatedb(self, filter, values):
+ class Cls(object): pass
table = self.get_table()
- result = dbsession.execute(table.update(), values)
+ clsmapper = mapper(Cls, table)
+ result = dbsession.query(clsmapper).filter_by(**filter).update(values)
dbsession.commit()
return result
class Cls(object): pass
table = self.get_table()
clsmapper = mapper(Cls, table)
- result = dbsession.query(clsmapper).filter_by(*filter).delete()
+ result = dbsession.query(clsmapper).filter_by(**filter).delete()
dbsession.commit()
return result
--- /dev/null
+import sys
+import traceback
+
+from sqlalchemy import MetaData, Table
+from sqlalchemy.exc import NoSuchTableError
+
+import migrate.versioning.api as migrate
+
+from PLC.Logger import logger
+import PLC.Storage.Model as model
+
+########## this class takes care of database upgrades
+#
+###
+# An initial attempt to run this as a 001_*.py migrate script
+# did not quite work out (essentially we need to set the current version
+# number out of the migrations logic)
+# also this approach has less stuff in the initscript, which seems just right
+
+class DBSchema:
+
+ header="Upgrading to 2.1 or higher"
+
+ def __init__ (self):
+ from sfa.storage.alchemy import alchemy
+ self.url=alchemy.url
+ self.engine=alchemy.engine
+ self.repository="/usr/share/plc/migrations"
+
+ def current_version (self):
+ try:
+ return migrate.db_version (self.url, self.repository)
+ except:
+ return None
+
+ def table_exists (self, tablename):
+ try:
+ metadata = MetaData (bind=self.engine)
+ table=Table (tablename, metadata, autoload=True)
+ return True
+ except NoSuchTableError:
+ return False
+
+ def drop_table (self, tablename):
+ if self.table_exists (tablename):
+ print >>sys.stderr, "%s: Dropping table %s"%(DBSchema.header,tablename)
+ self.engine.execute ("drop table %s cascade"%tablename)
+ else:
+ print >>sys.stderr, "%s: no need to drop table %s"%(DBSchema.header,tablename)
+
+ def handle_old_releases (self):
+ try:
+ pass
+ except:
+ print >> sys.stderr, "%s: unknown exception"%(DBSchema.header,)
+ traceback.print_exc ()
+
+ # after this call the db schema and the version as known by migrate should
+ # reflect the current data model and the latest known version
+ def init_or_upgrade (self):
+ # check if under version control, and initialize it otherwise
+ if self.current_version() is None:
+ before="Unknown"
+ # can be either a very old version, or a fresh install
+ # for very old versions:
+ self.handle_old_releases()
+ # in any case, initialize db from current code and reflect in migrate
+ model.init_tables(self.engine)
+ code_version = migrate.version (self.repository)
+ migrate.version_control (self.url, self.repository, code_version)
+ after="%s"%self.current_version()
+ logger.info("DBSchema : jumped to version %s"%(after))
+ else:
+ # use migrate in the usual way
+ before="%s"%self.current_version()
+ migrate.upgrade (self.url, self.repository)
+ after="%s"%self.current_version()
+ if before != after:
+ logger.info("DBSchema : upgraded version from %s to %s"%(before,after))
+ else:
+ logger.debug("DBSchema : no change needed in db schema (%s==%s)"%(before,after))
+
+ # this trashes the db altogether, from the current model in sfa.storage.model
+ # I hope this won't collide with ongoing migrations and all
+ # actually, now that sfa uses its own db, this is essentially equivalent to
+ # dropping the db entirely, modulo a 'service sfa start'
+ def nuke (self):
+ model.drop_tables(self.engine)
+ # so in this case it's like we haven't initialized the db at all
+ try:
+ migrate.drop_version_control (self.url, self.repository)
+ except migrate.exceptions.DatabaseNotControlledError:
+ logger.log_exc("Failed to drop version control")
+
+
+if __name__ == '__main__':
+ DBSchema().init_or_upgrade()