X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fstorage%2Fmodel.py;h=a2e3bd49b3dfd89613cc019a36c26722e95b76f8;hb=4a9e6751f9f396f463932133b9d62fc925a99ef6;hp=810af6b0760e947eaa73dd46a6f3747591fe8591;hpb=e11df870ed7f3739af715a6b744b96c7b15fd974;p=sfa.git diff --git a/sfa/storage/model.py b/sfa/storage/model.py index 810af6b0..a2e3bd49 100644 --- a/sfa/storage/model.py +++ b/sfa/storage/model.py @@ -1,6 +1,6 @@ -from types import StringTypes from datetime import datetime +from sqlalchemy import or_, and_ from sqlalchemy import Column, Integer, String, DateTime from sqlalchemy import Table, Column, MetaData, join, ForeignKey from sqlalchemy.orm import relationship, backref @@ -9,13 +9,16 @@ from sqlalchemy.orm import object_mapper from sqlalchemy.orm import validates from sqlalchemy.ext.declarative import declarative_base +from sfa.storage.record import Record from sfa.util.sfalogging import logger -from sfa.util.xml import XML +from sfa.util.sfatime import utcparse, datetime_to_string +from sfa.util.xml import XML +from sfa.util.py23 import StringType from sfa.trust.gid import GID ############################## -Base=declarative_base() +Base = declarative_base() #################### # dicts vs objects @@ -24,13 +27,13 @@ Base=declarative_base() # sqlalchemy however offers an object interface, meaning that you write obj.id instead of obj['id'] # which is admittedly much nicer # however we still need to deal with dictionaries if only for the xmlrpc layer -# -# here are a few utilities for this -# +# +# here are a few utilities for this +# # (*) first off, when an old pieve of code needs to be used as-is, if only temporarily, the simplest trick # is to use obj.__dict__ # this behaves exactly like required, i.e. obj.__dict__['field']='new value' does change obj.field -# however this depends on sqlalchemy's implementation so it should be avoided +# however this depends on sqlalchemy's implementation so it should be avoided # # (*) second, when an object needs to be exposed to the xmlrpc layer, we need to convert it into a dict # remember though that writing the resulting dictionary won't change the object @@ -45,49 +48,18 @@ Base=declarative_base() # (*) finally for converting a dictionary into an sqlalchemy object, we provide # obj.load_from_dict(dict) -class AlchemyObj: - def __iter__(self): + +class AlchemyObj(Record): + + def __iter__(self): self._i = iter(object_mapper(self).columns) - return self - def next(self): + return self + + def __next__(self): n = self._i.next().name return n, getattr(self, n) - def todict (self): - d=self.__dict__ - keys=[k for k in d.keys() if not k.startswith('_')] - return dict ( [ (k,d[k]) for k in keys ] ) - def load_from_dict (self, d): - for (k,v) in d.iteritems(): - # experimental - if isinstance(v, StringTypes) and v.lower() in ['true']: v=True - if isinstance(v, StringTypes) and v.lower() in ['false']: v=False - setattr(self,k,v) - - # in addition we provide convenience for converting to and from xml records - # for this purpose only, we need the subclasses to define 'fields' as either - # a list or a dictionary - def xml_fields (self): - fields=self.fields - if isinstance(fields,dict): fields=fields.keys() - return fields - - def save_as_xml (self): - # xxx not sure about the scope here - input_dict = dict( [ (key, getattr(self.key), ) for key in self.xml_fields() if getattr(self,key,None) ] ) - xml_record=XML("") - xml_record.parse_dict (input_dict) - return xml_record.toxml() - - def dump(self, dump_parents=False): - for key in self.fields: - if key == 'gid' and self.gid: - gid = GID(string=self.gid) - print " %s:" % key - gid.dump(8, dump_parents) - elif getattr(self,key,None): - print " %s: %s" % (key, getattr(self,key)) - -# # only intended for debugging + +# # only intended for debugging # def inspect (self, logger, message=""): # logger.info("%s -- Inspecting AlchemyObj -- attrs"%message) # for k in dir(self): @@ -103,114 +75,404 @@ class AlchemyObj: # various kinds of records are implemented as an inheritance hierarchy # RegRecord is the base class for all actual variants # a first draft was using 'type' as the discriminator for the inheritance -# but we had to define another more internal column (classtype) so we +# but we had to define another more internal column (classtype) so we # accomodate variants in types like authority+am and the like -class RegRecord (Base,AlchemyObj): - __tablename__ = 'records' - record_id = Column (Integer, primary_key=True) +class RegRecord(Base, AlchemyObj): + __tablename__ = 'records' + record_id = Column(Integer, primary_key=True) # this is the discriminator that tells which class to use - classtype = Column (String) - type = Column (String) - hrn = Column (String) - gid = Column (String) - authority = Column (String) - peer_authority = Column (String) - pointer = Column (Integer, default=-1) - date_created = Column (DateTime) - last_updated = Column (DateTime) + classtype = Column(String) + # in a first version type was the discriminator + # but that could not accomodate for 'authority+sa' and the like + type = Column(String) + hrn = Column(String) + gid = Column(String) + authority = Column(String) + peer_authority = Column(String) + pointer = Column(Integer, default=-1) + date_created = Column(DateTime) + last_updated = Column(DateTime) # use the 'type' column to decide which subclass the object is of - __mapper_args__ = { 'polymorphic_on' : classtype } - - fields = [ 'type', 'hrn', 'gid', 'authority', 'peer_authority' ] - def __init__ (self, type=None, hrn=None, gid=None, authority=None, peer_authority=None, - pointer=None, dict=None): - if type: self.type=type - if hrn: self.hrn=hrn - if gid: - if isinstance(gid, StringTypes): self.gid=gid - else: self.gid=gid.save_to_string(save_parents=True) - if authority: self.authority=authority - if peer_authority: self.peer_authority=peer_authority - if pointer: self.pointer=pointer - if dict: self.load_from_dict (dict) + __mapper_args__ = {'polymorphic_on': classtype} + + fields = ['type', 'hrn', 'gid', 'authority', 'peer_authority'] + + def __init__(self, type=None, hrn=None, gid=None, authority=None, peer_authority=None, + pointer=None, dict=None): + if type: + self.type = type + if hrn: + self.hrn = hrn + if gid: + if isinstance(gid, StringType): + self.gid = gid + else: + self.gid = gid.save_to_string(save_parents=True) + if authority: + self.authority = authority + if peer_authority: + self.peer_authority = peer_authority + if pointer: + self.pointer = pointer + if dict: + self.load_from_dict(dict) def __repr__(self): - result="[Record id=%s, type=%s, hrn=%s, authority=%s, pointer=%s" % \ - (self.record_id, self.type, self.hrn, self.authority, self.pointer) + result = "", " name={}>".format(self.name)) + return result + + def update_pis(self, pi_hrns, dbsession): + # strip that in case we have words + pi_hrns = [x.strip() for x in pi_hrns] + request = dbsession.query(RegUser).filter(RegUser.hrn.in_(pi_hrns)) + logger.info("RegAuthority.update_pis: %d incoming pis, %d matches found" + % (len(pi_hrns), request.count())) + pis = dbsession.query(RegUser).filter(RegUser.hrn.in_(pi_hrns)).all() + self.reg_pis = pis + +#################### + + +class RegSlice(RegRecord): + __tablename__ = 'slices' + __mapper_args__ = {'polymorphic_identity': 'slice'} + record_id = Column(Integer, ForeignKey( + "records.record_id"), primary_key=True) + # extensions come here + reg_researchers = relationship \ + ('RegUser', + secondary=slice_researcher_table, + primaryjoin=RegRecord.record_id == slice_researcher_table.c.slice_id, + secondaryjoin=RegRecord.record_id == slice_researcher_table.c.researcher_id, + backref='reg_slices_as_researcher', + ) + + def __init__(self, **kwds): + if 'type' not in kwds: + kwds['type'] = 'slice' + RegRecord.__init__(self, **kwds) + + def __repr__(self): + return RegRecord.__repr__(self).replace("Record", "Slice") + + def update_researchers(self, researcher_hrns, dbsession): + # strip that in case we have words + researcher_hrns = [x.strip() for x in researcher_hrns] + request = dbsession.query(RegUser).filter( + RegUser.hrn.in_(researcher_hrns)) + logger.info("RegSlice.update_researchers: %d incoming researchers, %d matches found" + % (len(researcher_hrns), request.count())) + researchers = dbsession.query(RegUser).filter( + RegUser.hrn.in_(researcher_hrns)).all() + self.reg_researchers = researchers + + # when dealing with credentials, we need to retrieve the PIs attached to a slice + # WARNING: with the move to passing dbsessions around, we face a glitch here because this + # helper function is called from the trust/ area that + def get_pis(self): + from sqlalchemy.orm import sessionmaker + Session = sessionmaker() + dbsession = Session.object_session(self) + from sfa.util.xrn import get_authority + authority_hrn = get_authority(self.hrn) + auth_record = dbsession.query( + RegAuthority).filter_by(hrn=authority_hrn).first() + return auth_record.reg_pis + + @validates('expires') + def validate_expires(self, key, incoming): + return self.validate_datetime(key, incoming) + +#################### + + +class RegNode(RegRecord): + __tablename__ = 'nodes' + __mapper_args__ = {'polymorphic_identity': 'node'} + record_id = Column(Integer, ForeignKey( + "records.record_id"), primary_key=True) + + def __init__(self, **kwds): + if 'type' not in kwds: + kwds['type'] = 'node' + RegRecord.__init__(self, **kwds) + + def __repr__(self): + return RegRecord.__repr__(self).replace("Record", "Node") + +#################### + + +class RegUser(RegRecord): + __tablename__ = 'users' # these objects will have type='user' in the records table - __mapper_args__ = { 'polymorphic_identity' : 'user' } - record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) - email = Column ('email', String) - + __mapper_args__ = {'polymorphic_identity': 'user'} + record_id = Column(Integer, ForeignKey( + "records.record_id"), primary_key=True) + # extensions come here + email = Column('email', String) + # can't use name 'keys' here because when loading from xml we're getting + # a 'keys' tag, and assigning a list of strings in a reference column like + # this crashes + reg_keys = relationship \ + ('RegKey', backref='reg_user', + cascade="all, delete, delete-orphan", + ) + + # so we can use RegUser (email=.., hrn=..) and the like + def __init__(self, **kwds): + # handle local settings + if 'email' in kwds: + self.email = kwds.pop('email') + if 'type' not in kwds: + kwds['type'] = 'user' + RegRecord.__init__(self, **kwds) + # append stuff at the end of the record __repr__ - def __repr__ (self): - result = RegRecord.__repr__(self).replace("Record","User") - result.replace ("]"," email=%s"%self.email) - result += "]" + def __repr__(self): + result = RegRecord.__repr__(self).replace("Record", "User") + result.replace(">", " email={}>".format(self.email)) return result - - @validates('email') + + @validates('email') def validate_email(self, key, address): assert '@' in address return address -class RegAuthority (RegRecord): - __tablename__ = 'authorities' - __mapper_args__ = { 'polymorphic_identity' : 'authority' } - record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) - - # no proper data yet, just hack the typename - def __repr__ (self): - return RegRecord.__repr__(self).replace("Record","Authority") - -class RegSlice (RegRecord): - __tablename__ = 'slices' - __mapper_args__ = { 'polymorphic_identity' : 'slice' } - record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) - - def __repr__ (self): - return RegRecord.__repr__(self).replace("Record","Slice") - -class RegNode (RegRecord): - __tablename__ = 'nodes' - __mapper_args__ = { 'polymorphic_identity' : 'node' } - record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True) - - def __repr__ (self): - return RegRecord.__repr__(self).replace("Record","Node") +#################### +# xxx tocheck : not sure about eager loading of this one +# meaning, when querying the whole records, we expect there should +# be a single query to fetch all the keys +# or, is it enough that we issue a single query to retrieve all the keys + + +class RegKey(Base): + __tablename__ = 'keys' + key_id = Column(Integer, primary_key=True) + record_id = Column(Integer, ForeignKey("records.record_id")) + key = Column(String) + pointer = Column(Integer, default=-1) + + def __init__(self, key, pointer=None): + self.key = key + if pointer: + self.pointer = pointer + + def __repr__(self): + result = ":} +# so after that, an 'authority' record will e.g. have a 'reg-pis' field +# with the hrns of its pi-users +augment_map = {'authority': {'reg-pis': 'reg_pis', }, + 'slice': {'reg-researchers': 'reg_researchers', }, + 'user': {'reg-pi-authorities': 'reg_authorities_as_pi', + 'reg-slices': 'reg_slices_as_researcher', }, + } + + +# xxx mystery +# the way we use sqlalchemy might be a little wrong +# in any case what has been observed is that (Reg)Records as returned by an sqlalchemy +# query not always have their __dict__ properly adjusted +# typically a RegAuthority object would have its object.name set properly, but +# object.__dict__ has no 'name' key +# which is an issue because we rely on __dict__ for many things, in particular this +# is what gets exposed to the drivers (this is historical and dates back before sqlalchemy) +# so it is recommended to always run this function that will make sure +# that such built-in fields are properly set in __dict__ too +# +def augment_with_sfa_builtins(local_record): + # don't ruin the import of that file in a client world + from sfa.util.xrn import Xrn + # add a 'urn' field + setattr(local_record, 'reg-urn', + Xrn(xrn=local_record.hrn, type=local_record.type).urn) + # users have keys and this is needed to synthesize 'users' sent over to + # CreateSliver + fields_to_check = [] + if local_record.type == 'user': + user_keys = [key.key for key in local_record.reg_keys] + setattr(local_record, 'reg-keys', user_keys) + fields_to_check = ['email'] + elif local_record.type == 'authority': + fields_to_check = ['name'] + for field in fields_to_check: + if not field in local_record.__dict__: + logger.debug("augment_with_sfa_builtins: hotfixing missing '{}' in {}" + .format(field, local_record.hrn)) + local_record.__dict__[field] = getattr(local_record, field) + # search in map according to record type + type_map = augment_map.get(local_record.type, {}) + # use type-dep. map to do the job + for (field_name, attribute) in list(type_map.items()): + # get related objects + related_records = getattr(local_record, attribute, []) + hrns = [r.hrn for r in related_records] + setattr(local_record, field_name, hrns)