from types import StringTypes from datetime import datetime from sqlalchemy import Column, Integer, String, DateTime from sqlalchemy import Table, Column, MetaData, join, ForeignKey from sqlalchemy.orm import relationship, backref from sqlalchemy.orm import column_property from sqlalchemy.orm import object_mapper from sqlalchemy.orm import validates from sqlalchemy.ext.declarative import declarative_base from sfa.util.sfalogging import logger from sfa.util.sfatime import utcparse, datetime_to_string from sfa.util.xml import XML from sfa.trust.gid import GID ############################## Base=declarative_base() #################### # dicts vs objects #################### # historically the front end to the db dealt with dicts, so the code was only dealing with dicts # sqlalchemy however offers an object interface, meaning that you write obj.id instead of obj['id'] # which is admittedly much nicer # however we still need to deal with dictionaries if only for the xmlrpc layer # # here are a few utilities for this # # (*) first off, when an old pieve of code needs to be used as-is, if only temporarily, the simplest trick # is to use obj.__dict__ # this behaves exactly like required, i.e. obj.__dict__['field']='new value' does change obj.field # however this depends on sqlalchemy's implementation so it should be avoided # # (*) second, when an object needs to be exposed to the xmlrpc layer, we need to convert it into a dict # remember though that writing the resulting dictionary won't change the object # essentially obj.__dict__ would be fine too, except that we want to discard alchemy private keys starting with '_' # 2 ways are provided for that: # . dict(obj) # . obj.todict() # the former dict(obj) relies on __iter__() and next() below, and does not rely on the fields names # although it seems to work fine, I've found cases where it issues a weird python error that I could not get right # so the latter obj.todict() seems more reliable but more hacky as is relies on the form of fields, so this can probably be improved # # (*) finally for converting a dictionary into an sqlalchemy object, we provide # obj.load_from_dict(dict) class AlchemyObj: def __iter__(self): self._i = iter(object_mapper(self).columns) return self def next(self): n = self._i.next().name return n, getattr(self, n) def todict (self): d=self.__dict__ keys=[k for k in d.keys() if not k.startswith('_')] return dict ( [ (k,d[k]) for k in keys ] ) def load_from_dict (self, d): for (k,v) in d.iteritems(): # experimental if isinstance(v, StringTypes) and v.lower() in ['true']: v=True if isinstance(v, StringTypes) and v.lower() in ['false']: v=False setattr(self,k,v) def validate_datetime (self, key, incoming): if isinstance (incoming, datetime): return incoming elif isinstance (incoming, (int,float)):return datetime.fromtimestamp (incoming) # in addition we provide convenience for converting to and from xml records # for this purpose only, we need the subclasses to define 'fields' as either # a list or a dictionary def xml_fields (self): fields=self.fields if isinstance(fields,dict): fields=fields.keys() return fields def save_as_xml (self): # xxx not sure about the scope here input_dict = dict( [ (key, getattr(self.key), ) for key in self.xml_fields() if getattr(self,key,None) ] ) xml_record=XML("") xml_record.parse_dict (input_dict) return xml_record.toxml() def dump(self, format=None, dump_parents=False): if not format: format = 'text' else: format = format.lower() if format == 'text': self.dump_text(dump_parents) elif format == 'xml': print self.save_to_string() elif format == 'simple': print self.dump_simple() else: raise Exception, "Invalid format %s" % format # xxx fixme # turns out the date_created field is received by the client as a 'created' int # (and 'last_updated' does not make it at all) # let's be flexible def date_repr (self,fields): if not isinstance(fields,list): fields=[fields] for field in fields: value=getattr(self,field,None) if isinstance (value,datetime): return datetime_to_string (value) elif isinstance (value,(int,float)): return datetime_to_string(utcparse(value)) # fallback return "** undef_datetime **" def dump_text(self, dump_parents=False): # print core fields in this order core_fields = [ 'hrn', 'type', 'authority', 'date_created', 'created', 'last_updated', 'gid', ] print "".join(['=' for i in range(40)]) print "RECORD" print " hrn:", self.hrn print " type:", self.type print " authority:", self.authority print " date created:", self.date_repr( ['date_created','created'] ) print " last updated:", self.date_repr('last_updated') print " gid:" print self.get_gid_object().dump_string(8, dump_parents) # print remaining fields for attrib_name in dir(self): attrib = getattr(self, attrib_name) # skip internals if attrib_name.startswith('_'): continue # skip core fields if attrib_name in core_fields: continue # skip callables if callable (attrib): continue print " %s: %s" % (attrib_name, attrib) def dump_simple(self): return "%s"%self # # only intended for debugging # def inspect (self, logger, message=""): # logger.info("%s -- Inspecting AlchemyObj -- attrs"%message) # for k in dir(self): # if not k.startswith('_'): # logger.info (" %s: %s"%(k,getattr(self,k))) # logger.info("%s -- Inspecting AlchemyObj -- __dict__"%message) # d=self.__dict__ # for (k,v) in d.iteritems(): # logger.info("[%s]=%s"%(k,v)) ############################## # various kinds of records are implemented as an inheritance hierarchy # RegRecord is the base class for all actual variants # a first draft was using 'type' as the discriminator for the inheritance # but we had to define another more internal column (classtype) so we # accomodate variants in types like authority+am and the like class RegRecord (Base,AlchemyObj): __tablename__ = 'records' record_id = Column (Integer, primary_key=True) # this is the discriminator that tells which class to use classtype = Column (String) # in a first version type was the discriminator # but that could not accomodate for 'authority+sa' and the like type = Column (String) hrn = Column (String) gid = Column (String) authority = Column (String) peer_authority = Column (String) pointer = Column (Integer, default=-1) date_created = Column (DateTime) last_updated = Column (DateTime) # use the 'type' column to decide which subclass the object is of __mapper_args__ = { 'polymorphic_on' : classtype } fields = [ 'type', 'hrn', 'gid', 'authority', 'peer_authority' ] def __init__ (self, type=None, hrn=None, gid=None, authority=None, peer_authority=None, pointer=None, dict=None): if type: self.type=type if hrn: self.hrn=hrn if gid: if isinstance(gid, StringTypes): self.gid=gid else: self.gid=gid.save_to_string(save_parents=True) if authority: self.authority=authority if peer_authority: self.peer_authority=peer_authority if pointer: self.pointer=pointer if dict: self.load_from_dict (dict) def __repr__(self): result=""," email=%s"%self.email) result += ">" return result @validates('email') def validate_email(self, key, address): assert '@' in address return address #################### # xxx tocheck : not sure about eager loading of this one # meaning, when querying the whole records, we expect there should # be a single query to fetch all the keys # or, is it enough that we issue a single query to retrieve all the keys class RegKey (Base): __tablename__ = 'keys' key_id = Column (Integer, primary_key=True) record_id = Column (Integer, ForeignKey ("records.record_id")) key = Column (String) pointer = Column (Integer, default = -1) def __init__ (self, key, pointer=None): self.key=key if pointer: self.pointer=pointer def __repr__ (self): result="