From ca4a71145de39b3725517aec5801291a6c869c53 Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Wed, 15 Nov 2006 10:59:55 +0000 Subject: [PATCH] - provides ability to cache foreign slices only LOCAL nodes are known to foreign slices, with their local id as expected - RefreshPeer logic currently is *) invoke GetNodes and cache into ForeignNodes *) invoke ForeignNodes, not cached for now, used in next step *) invoke GetSlices, then caches into ForeignSlices, only local nodes are considered, with their alien node_ids transcoded to local node_ids - gets rid of the foreign_id columns in peer_node and peer_slice, that turned out useless === a few pending issues - GetSlivers not yet updated accordingly it still works on non-federated plcs - site_id and creator_person_id in the slices table are not declared 'NOT NULL' anymore. as far as GetSlices is concerned these entries are still non null, as per the WHERE stuff added on view_slices - refreshing the db to update the slice x node association is done in a clear-all/add-all fashion that clearly sucks. - the caching logic is currently in Peers.py which is clearly wrong there's a need for refactoring these 2 types of cached objects to provide more genericity --- PLC/ForeignSlices.py | 46 +++++++---- PLC/Methods/RefreshPeer.py | 4 +- PLC/Peers.py | 165 +++++++++++++++++++++++++++++++++---- planetlab4.sql | 18 ++-- 4 files changed, 195 insertions(+), 38 deletions(-) diff --git a/PLC/ForeignSlices.py b/PLC/ForeignSlices.py index 20d88a8..c0372ab 100644 --- a/PLC/ForeignSlices.py +++ b/PLC/ForeignSlices.py @@ -28,6 +28,7 @@ class ForeignSlice (Row) : 'max_nodes': Parameter(int, "Maximum number of nodes that can be assigned to this slice"), 'created': Parameter(int, "Date and time when slice was created, in seconds since UNIX epoch", ro = True), 'expires': Parameter(int, "Date and time when slice expires, in seconds since UNIX epoch"), + 'node_ids' : Parameter([int], "List of nodes in this slice"), } def __init__(self,api,fields={},uptodate=True): @@ -46,6 +47,35 @@ class ForeignSlice (Row) : if commit: self.api.db.commit() + def purge_slice_node (self,commit=True): + sql = "DELETE FROM slice_node WHERE slice_id=%d"%self['slice_id'] + self.api.db.do(sql) + if commit: + self.api.db.commit() + + def add_slice_nodes (self, node_ids, commit=True): + slice_id = self['slice_id'] + ### xxx needs to be optimized + ### tried to figure a way to use a single sql statement + ### like: insert into table (x,y) values (1,2),(3,4); + ### but apparently this is not supported under postgresql + for node_id in node_ids: + sql="INSERT INTO slice_node VALUES (%d,%d)"%(slice_id,node_id) + self.api.db.do(sql) + if commit: + self.api.db.commit() + + def update_slice_nodes (self, node_ids): + # xxx to be optimized + # we could compute the (set) difference between + # current and updated set of node_ids + # and invoke the DB only based on that + # + # for now : clean all entries for this slice + self.purge_slice_node() + # and re-install new list + self.add_slice_nodes (node_ids) + def delete (self, commit=True): """ Delete existing foreign slice. @@ -62,7 +92,7 @@ class ForeignSlices (Table): sql = "" sql += "SELECT %s FROM view_foreign_slices " % ", ".join(self.columns) sql += "WHERE deleted IS False " - + if foreign_slice_filter is not None: if isinstance(foreign_slice_filter, (list, tuple, set)): # Separate the list into integers and strings @@ -76,17 +106,3 @@ class ForeignSlices (Table): self.selectall(sql) - # managing an index by slicename - def name_index(self): - if 'name' not in self.columns: - raise PLCFault,"ForeignSlices::name_index, name not selected" - self.index={} - for foreign_slice in self: - self.index[foreign_slice['name']]=foreign_slice - - def name_add_by(self,foreign_slice): - self.index[foreign_slice['name']]=foreign_slice - - def name_locate(self,name): - return self.index[name] - diff --git a/PLC/Methods/RefreshPeer.py b/PLC/Methods/RefreshPeer.py index 4a5abd0..d8dbbc2 100644 --- a/PLC/Methods/RefreshPeer.py +++ b/PLC/Methods/RefreshPeer.py @@ -64,7 +64,9 @@ class RefreshPeer(Method): peer_get_nodes = apiserver.GetNodes(auth) nb_new_nodes = peer.refresh_nodes(peer_get_nodes) + # rough and temporary + peer_foreign_nodes = apiserver.GetForeignNodes(auth) peer_get_slices = apiserver.GetSlices(auth) - nb_new_slices = peer.refresh_slices(peer_get_slices) + nb_new_slices = peer.refresh_slices(peer_get_slices,peer_foreign_nodes) return (self.api.config.PLC_NAME,nb_new_nodes,nb_new_slices) diff --git a/PLC/Peers.py b/PLC/Peers.py index 94d3204..c4ff3df 100644 --- a/PLC/Peers.py +++ b/PLC/Peers.py @@ -10,7 +10,9 @@ from PLC.Parameter import Parameter from PLC.Filter import Filter from PLC.Table import Row, Table +from PLC.Nodes import Nodes,Node from PLC.ForeignNodes import ForeignNodes,ForeignNode +from PLC.ForeignSlices import ForeignSlices,ForeignSlice class Peer (Row): """ @@ -25,7 +27,8 @@ class Peer (Row): 'peername' : Parameter (str, "Peer name"), 'peer_url' : Parameter (str, "Peer API url"), 'person_id' : Parameter (int, "Person_id of the account storing credentials - temporary"), - 'node_ids' : Parameter ([int], "This peer's nodes ids") + 'node_ids' : Parameter ([int], "This peer's nodes ids"), + 'slice_ids' : Parameter ([int], "This peer's slices ids"), } def validate_peer_url (self, url): @@ -64,14 +67,13 @@ class Peer (Row): node_ids = self.api.db.selectall(sql) return node_ids[0]['node_ids'] - def manage_node (self, foreign_node, foreign_id, commit=True): + def manage_node (self, foreign_node, add_if_true, commit=True): """ associate/dissociate a foreign node to/from a peer foreign_node is a local object that describes a remote node - foreign_id is the unique id as provided by the remote peer convention is: - if foreign_id is None : performs dissociation - otherwise: performs association + if add_if_true is None : performs dissociation + otherwise: performs association """ assert 'peer_id' in self @@ -80,13 +82,14 @@ class Peer (Row): peer_id = self['peer_id'] node_id = foreign_node ['node_id'] - if foreign_id: + if add_if_true: ### ADDING - sql = "INSERT INTO peer_node VALUES (%d,%d,%d)" % (peer_id,node_id,foreign_id) + sql = "INSERT INTO peer_node VALUES (%d,%d)" % (peer_id,node_id) self.api.db.do(sql) if self['node_ids'] is None: self['node_ids']=[node_id,] - self['node_ids'].append(node_id) + else: + self['node_ids'].append(node_id) ### DELETING else: sql = "DELETE FROM peer_node WHERE peer_id=%d AND node_id=%d" % (peer_id,node_id) @@ -96,6 +99,39 @@ class Peer (Row): if commit: self.api.db.commit() + def manage_slice (self, foreign_slice, add_if_true, commit=True): + """ + associate/dissociate a foreign node to/from a peer + foreign_slice is a local object that describes a remote slice + alien_id is the unique id as provided by the remote peer + convention is: + if add_if_true is None : performs dissociation + otherwise: performs association + """ + + assert 'peer_id' in self + assert 'slice_id' in foreign_slice + + peer_id = self['peer_id'] + slice_id = foreign_slice ['slice_id'] + + if add_if_true: + ### ADDING + sql = "INSERT INTO peer_slice VALUES (%d,%d)" % (peer_id,slice_id) + self.api.db.do(sql) + if self['slice_ids'] is None: + self['slice_ids']=[slice_id,] + else: + self['slice_ids'].append(slice_id) + ### DELETING + else: + sql = "DELETE FROM peer_slice WHERE peer_id=%d AND slice_id=%d" % (peer_id,slice_id) + self.api.db.do(sql) + self['slice_ids'].remove(slice_id) + + if commit: + self.api.db.commit() + def refresh_nodes (self, peer_get_nodes): """ refreshes the foreign_nodes and peer_node tables @@ -109,7 +145,7 @@ class Peer (Row): # we get the whole table just in case # a host would have switched from one plc to the other local_foreign_nodes = ForeignNodes (self.api) - # new to index it by hostname for searching later + # index it by hostname for searching later local_foreign_nodes_index = local_foreign_nodes.dict('hostname') ### mark entries for this peer outofdate @@ -125,7 +161,6 @@ class Peer (Row): ### scan the new entries, and mark them uptodate for node in peer_get_nodes: hostname = node['hostname'] - foreign_id = node ['node_id'] try: foreign_node = local_foreign_nodes_index[hostname] if foreign_node['peer_id'] != peer_id: @@ -134,9 +169,9 @@ class Peer (Row): old_peers=Peers(self.api,[peer_id]) assert old_peer[0] # remove from previous peer - old_peers[0].manage_node(foreign_node,None,False) + old_peers[0].manage_node(foreign_node,False,False) # add to new peer - self.manage_node(foreign_node,foreign_id,True) + self.manage_node(foreign_node,True,True) foreign_node['peer_id'] = peer_id ### update it anyway: copy other relevant fields for field in remote_fields: @@ -151,7 +186,7 @@ class Peer (Row): ### need to sync so we get a node_id new_foreign_node.sync() new_foreign_node.uptodate = True - self.manage_node(new_foreign_node,foreign_id,True) + self.manage_node(new_foreign_node,True,True) local_foreign_nodes_index[hostname]=new_foreign_node ### delete entries that are not uptodate @@ -161,10 +196,110 @@ class Peer (Row): return len(peer_get_nodes)-old_count - def refresh_slices (self, peer_get_slices): - return 0 + ### transcode node_id + def locate_alien_node_id_in_foreign_nodes (self, peer_foreign_nodes_dict, alien_id): + """ + returns a local node_id as transcoded from an alien node_id + only lookups our local nodes because we dont need to know about other sites + returns a valid local node_id, or throws an exception + """ + peer_foreign_node = peer_foreign_nodes_dict[alien_id] + hostname = peer_foreign_node['hostname'] + return Nodes(self.api,[hostname])[0]['node_id'] + + def refresh_slices (self, peer_get_slices, peer_foreign_nodes): + """ + refreshes the foreign_slices and peer_slice tables + expected input is the current list of slices as returned by GetSlices + returns the number of new slices on this peer (can be negative) + """ + + peer_id = self['peer_id'] + # we get the whole table just in case + # a host would have switched from one plc to the other + local_foreign_slices = ForeignSlices (self.api) + # index it by name for searching later + local_foreign_slices_index = local_foreign_slices.dict('name') + + ### mark entries for this peer outofdate + old_count=0; + for foreign_slice in local_foreign_slices: + if foreign_slice['peer_id'] == peer_id: + foreign_slice.uptodate=False + old_count += 1 + + ### these fields get copied through + remote_fields = ['instantiation', 'url', 'description', + 'max_nodes', 'created', 'expires'] + + ### scan the new entries, and mark them uptodate + new_count=0 + for slice in peer_get_slices: + ### ignore system-wide slices + if slice['creator_person_id'] == 1: + continue + + name = slice['name'] + + # create or update + try: + foreign_slice = local_foreign_slices_index[name] + if foreign_slice['peer_id'] != peer_id: + ### the slice has changed its plc, needs to update peer_slice + old_peer_id = foreign_slice['peer_id'] + old_peers=Peers(self.api,[peer_id]) + assert old_peer[0] + # remove from previous peer + old_peers[0].manage_slice(foreign_slice,False,False) + # add to new peer + self.manage_slice(foreign_slice,True,True) + foreign_slice['peer_id'] = peer_id + except: + foreign_slice = ForeignSlice(self.api, {'name':name}) +# ### xxx temporary +# foreign_slice['site_id']=1 + ### need to sync so we get a slice_id + foreign_slice.sync() + self.manage_slice(foreign_slice,True,True) + # insert in index + local_foreign_slices_index[name]=foreign_slice + + # go on with update + for field in remote_fields: + foreign_slice[field]=slice[field] + # this row is now valid + foreign_slice.uptodate=True + new_count += 1 + foreign_slice.sync() + + ### handle node_ids + # in slice we get a set of node_ids + # but these ids are RELATIVE TO THE PEER + # so we need to figure the local node_id for these nodes + # we do this through peer_foreign_nodes + # dictify once + peer_foreign_nodes_dict = {} + for foreign_node in peer_foreign_nodes: + peer_foreign_nodes_dict[foreign_node['node_id']]=foreign_node + updated_node_ids = [] + for alien_node_id in slice['node_ids']: + try: + local_node_id=self.locate_alien_node_id_in_foreign_nodes(peer_foreign_nodes_dict,alien_node_id) + updated_node_ids.append(local_node_id) + except: + # this node_id is not in our scope + pass + foreign_slice.update_slice_nodes (updated_node_ids) + + ### delete entries that are not uptodate + for foreign_slice in local_foreign_slices: + if not foreign_slice.uptodate: + foreign_slice.delete() + + return new_count-old_count + class Peers (Table): """ Maps to the peers table in the database diff --git a/planetlab4.sql b/planetlab4.sql index 31711e1..8b38319 100644 --- a/planetlab4.sql +++ b/planetlab4.sql @@ -9,7 +9,7 @@ -- -- Copyright (C) 2006 The Trustees of Princeton University -- --- $Id: planetlab4.sql,v 1.33 2006/11/13 18:41:59 mlhuang Exp $ +-- $Id: planetlab4.sql,v 1.34 2006/11/14 09:44:40 thierry Exp $ -- -------------------------------------------------------------------------------- @@ -299,7 +299,6 @@ GROUP BY site_id; CREATE TABLE peer_node ( peer_id integer REFERENCES peers NOT NULL, -- Peer identifier node_id integer REFERENCES nodes NOT NULL, -- (Local) node identifier - foreign_id integer NOT NULL, -- (Peer) node identifier PRIMARY KEY (peer_id, node_id), UNIQUE (node_id) -- Nodes can only be at one peer ) WITH OIDS; @@ -530,7 +529,8 @@ INSERT INTO slice_instantiations (instantiation) VALUES ('delegated'); -- Manual -- Slices CREATE TABLE slices ( slice_id serial PRIMARY KEY, -- Slice identifier - site_id integer REFERENCES sites NOT NULL, -- Site identifier +-- xxx temporarily remove the NOT NULL constraint + site_id integer REFERENCES sites, -- Site identifier name text NOT NULL, -- Slice name instantiation text REFERENCES slice_instantiations NOT NULL DEFAULT 'plc-instantiated', -- Slice state, e.g. plc-instantiated url text, -- Project URL @@ -538,7 +538,8 @@ CREATE TABLE slices ( max_nodes integer NOT NULL DEFAULT 100, -- Maximum number of nodes that can be assigned to this slice - creator_person_id integer REFERENCES persons NOT NULL, -- Creator +-- xxx temporarily remove the NOT NULL constraint + creator_person_id integer REFERENCES persons, -- Creator created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, -- Creation date expires timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP + '2 weeks', -- Expiration date @@ -585,7 +586,6 @@ GROUP BY site_id; CREATE TABLE peer_slice ( peer_id integer REFERENCES peers NOT NULL, -- peer primary key slice_id integer REFERENCES slices NOT NULL, -- node primary key - foreign_id integer NOT NULL, PRIMARY KEY (peer_id, slice_id) ) WITH OIDS; CREATE INDEX peer_slice_peer_id_idx ON peer_slice (peer_id); @@ -969,7 +969,9 @@ LEFT JOIN peer_slice USING (slice_id) LEFT JOIN slice_nodes USING (slice_id) LEFT JOIN slice_persons USING (slice_id) LEFT JOIN slice_attributes USING (slice_id) -WHERE peer_slice.peer_id IS NULL; +WHERE peer_slice.peer_id IS NULL +AND slices.site_id IS NOT NULL +AND slices.creator_person_id IS NOT NULL; CREATE VIEW view_foreign_slices AS SELECT @@ -982,9 +984,11 @@ slices.description, slices.max_nodes, slices.deleted, CAST(date_part('epoch', slices.created) AS bigint) AS created, -CAST(date_part('epoch', slices.expires) AS bigint) AS expires +CAST(date_part('epoch', slices.expires) AS bigint) AS expires, +COALESCE(slice_nodes.node_ids, '{}') AS node_ids FROM slices LEFT JOIN peer_slice USING (slice_id) +LEFT JOIN slice_nodes USING (slice_id) WHERE peer_slice.peer_id IS NOT NULL; -- -- 2.43.0