'max_nodes': Parameter(int, "Maximum number of nodes that can be assigned to this slice"),
'created': Parameter(int, "Date and time when slice was created, in seconds since UNIX epoch", ro = True),
'expires': Parameter(int, "Date and time when slice expires, in seconds since UNIX epoch"),
+ 'node_ids' : Parameter([int], "List of nodes in this slice"),
}
def __init__(self,api,fields={},uptodate=True):
if commit:
self.api.db.commit()
+ def purge_slice_node (self,commit=True):
+ sql = "DELETE FROM slice_node WHERE slice_id=%d"%self['slice_id']
+ self.api.db.do(sql)
+ if commit:
+ self.api.db.commit()
+
+ def add_slice_nodes (self, node_ids, commit=True):
+ slice_id = self['slice_id']
+ ### xxx needs to be optimized
+ ### tried to figure a way to use a single sql statement
+ ### like: insert into table (x,y) values (1,2),(3,4);
+ ### but apparently this is not supported under postgresql
+ for node_id in node_ids:
+ sql="INSERT INTO slice_node VALUES (%d,%d)"%(slice_id,node_id)
+ self.api.db.do(sql)
+ if commit:
+ self.api.db.commit()
+
+ def update_slice_nodes (self, node_ids):
+ # xxx to be optimized
+ # we could compute the (set) difference between
+ # current and updated set of node_ids
+ # and invoke the DB only based on that
+ #
+ # for now : clean all entries for this slice
+ self.purge_slice_node()
+ # and re-install new list
+ self.add_slice_nodes (node_ids)
+
def delete (self, commit=True):
"""
Delete existing foreign slice.
sql = ""
sql += "SELECT %s FROM view_foreign_slices " % ", ".join(self.columns)
sql += "WHERE deleted IS False "
-
+
if foreign_slice_filter is not None:
if isinstance(foreign_slice_filter, (list, tuple, set)):
# Separate the list into integers and strings
self.selectall(sql)
- # managing an index by slicename
- def name_index(self):
- if 'name' not in self.columns:
- raise PLCFault,"ForeignSlices::name_index, name not selected"
- self.index={}
- for foreign_slice in self:
- self.index[foreign_slice['name']]=foreign_slice
-
- def name_add_by(self,foreign_slice):
- self.index[foreign_slice['name']]=foreign_slice
-
- def name_locate(self,name):
- return self.index[name]
-
from PLC.Filter import Filter
from PLC.Table import Row, Table
+from PLC.Nodes import Nodes,Node
from PLC.ForeignNodes import ForeignNodes,ForeignNode
+from PLC.ForeignSlices import ForeignSlices,ForeignSlice
class Peer (Row):
"""
'peername' : Parameter (str, "Peer name"),
'peer_url' : Parameter (str, "Peer API url"),
'person_id' : Parameter (int, "Person_id of the account storing credentials - temporary"),
- 'node_ids' : Parameter ([int], "This peer's nodes ids")
+ 'node_ids' : Parameter ([int], "This peer's nodes ids"),
+ 'slice_ids' : Parameter ([int], "This peer's slices ids"),
}
def validate_peer_url (self, url):
node_ids = self.api.db.selectall(sql)
return node_ids[0]['node_ids']
- def manage_node (self, foreign_node, foreign_id, commit=True):
+ def manage_node (self, foreign_node, add_if_true, commit=True):
"""
associate/dissociate a foreign node to/from a peer
foreign_node is a local object that describes a remote node
- foreign_id is the unique id as provided by the remote peer
convention is:
- if foreign_id is None : performs dissociation
- otherwise: performs association
+ if add_if_true is None : performs dissociation
+ otherwise: performs association
"""
assert 'peer_id' in self
peer_id = self['peer_id']
node_id = foreign_node ['node_id']
- if foreign_id:
+ if add_if_true:
### ADDING
- sql = "INSERT INTO peer_node VALUES (%d,%d,%d)" % (peer_id,node_id,foreign_id)
+ sql = "INSERT INTO peer_node VALUES (%d,%d)" % (peer_id,node_id)
self.api.db.do(sql)
if self['node_ids'] is None:
self['node_ids']=[node_id,]
- self['node_ids'].append(node_id)
+ else:
+ self['node_ids'].append(node_id)
### DELETING
else:
sql = "DELETE FROM peer_node WHERE peer_id=%d AND node_id=%d" % (peer_id,node_id)
if commit:
self.api.db.commit()
+ def manage_slice (self, foreign_slice, add_if_true, commit=True):
+ """
+ associate/dissociate a foreign node to/from a peer
+ foreign_slice is a local object that describes a remote slice
+ alien_id is the unique id as provided by the remote peer
+ convention is:
+ if add_if_true is None : performs dissociation
+ otherwise: performs association
+ """
+
+ assert 'peer_id' in self
+ assert 'slice_id' in foreign_slice
+
+ peer_id = self['peer_id']
+ slice_id = foreign_slice ['slice_id']
+
+ if add_if_true:
+ ### ADDING
+ sql = "INSERT INTO peer_slice VALUES (%d,%d)" % (peer_id,slice_id)
+ self.api.db.do(sql)
+ if self['slice_ids'] is None:
+ self['slice_ids']=[slice_id,]
+ else:
+ self['slice_ids'].append(slice_id)
+ ### DELETING
+ else:
+ sql = "DELETE FROM peer_slice WHERE peer_id=%d AND slice_id=%d" % (peer_id,slice_id)
+ self.api.db.do(sql)
+ self['slice_ids'].remove(slice_id)
+
+ if commit:
+ self.api.db.commit()
+
def refresh_nodes (self, peer_get_nodes):
"""
refreshes the foreign_nodes and peer_node tables
# we get the whole table just in case
# a host would have switched from one plc to the other
local_foreign_nodes = ForeignNodes (self.api)
- # new to index it by hostname for searching later
+ # index it by hostname for searching later
local_foreign_nodes_index = local_foreign_nodes.dict('hostname')
### mark entries for this peer outofdate
### scan the new entries, and mark them uptodate
for node in peer_get_nodes:
hostname = node['hostname']
- foreign_id = node ['node_id']
try:
foreign_node = local_foreign_nodes_index[hostname]
if foreign_node['peer_id'] != peer_id:
old_peers=Peers(self.api,[peer_id])
assert old_peer[0]
# remove from previous peer
- old_peers[0].manage_node(foreign_node,None,False)
+ old_peers[0].manage_node(foreign_node,False,False)
# add to new peer
- self.manage_node(foreign_node,foreign_id,True)
+ self.manage_node(foreign_node,True,True)
foreign_node['peer_id'] = peer_id
### update it anyway: copy other relevant fields
for field in remote_fields:
### need to sync so we get a node_id
new_foreign_node.sync()
new_foreign_node.uptodate = True
- self.manage_node(new_foreign_node,foreign_id,True)
+ self.manage_node(new_foreign_node,True,True)
local_foreign_nodes_index[hostname]=new_foreign_node
### delete entries that are not uptodate
return len(peer_get_nodes)-old_count
- def refresh_slices (self, peer_get_slices):
- return 0
+ ### transcode node_id
+ def locate_alien_node_id_in_foreign_nodes (self, peer_foreign_nodes_dict, alien_id):
+ """
+ returns a local node_id as transcoded from an alien node_id
+ only lookups our local nodes because we dont need to know about other sites
+ returns a valid local node_id, or throws an exception
+ """
+ peer_foreign_node = peer_foreign_nodes_dict[alien_id]
+ hostname = peer_foreign_node['hostname']
+ return Nodes(self.api,[hostname])[0]['node_id']
+
+ def refresh_slices (self, peer_get_slices, peer_foreign_nodes):
+ """
+ refreshes the foreign_slices and peer_slice tables
+ expected input is the current list of slices as returned by GetSlices
+ returns the number of new slices on this peer (can be negative)
+ """
+
+ peer_id = self['peer_id']
+ # we get the whole table just in case
+ # a host would have switched from one plc to the other
+ local_foreign_slices = ForeignSlices (self.api)
+ # index it by name for searching later
+ local_foreign_slices_index = local_foreign_slices.dict('name')
+
+ ### mark entries for this peer outofdate
+ old_count=0;
+ for foreign_slice in local_foreign_slices:
+ if foreign_slice['peer_id'] == peer_id:
+ foreign_slice.uptodate=False
+ old_count += 1
+
+ ### these fields get copied through
+ remote_fields = ['instantiation', 'url', 'description',
+ 'max_nodes', 'created', 'expires']
+
+ ### scan the new entries, and mark them uptodate
+ new_count=0
+ for slice in peer_get_slices:
+ ### ignore system-wide slices
+ if slice['creator_person_id'] == 1:
+ continue
+
+ name = slice['name']
+
+ # create or update
+ try:
+ foreign_slice = local_foreign_slices_index[name]
+ if foreign_slice['peer_id'] != peer_id:
+ ### the slice has changed its plc, needs to update peer_slice
+ old_peer_id = foreign_slice['peer_id']
+ old_peers=Peers(self.api,[peer_id])
+ assert old_peer[0]
+ # remove from previous peer
+ old_peers[0].manage_slice(foreign_slice,False,False)
+ # add to new peer
+ self.manage_slice(foreign_slice,True,True)
+ foreign_slice['peer_id'] = peer_id
+ except:
+ foreign_slice = ForeignSlice(self.api, {'name':name})
+# ### xxx temporary
+# foreign_slice['site_id']=1
+ ### need to sync so we get a slice_id
+ foreign_slice.sync()
+ self.manage_slice(foreign_slice,True,True)
+ # insert in index
+ local_foreign_slices_index[name]=foreign_slice
+
+ # go on with update
+ for field in remote_fields:
+ foreign_slice[field]=slice[field]
+ # this row is now valid
+ foreign_slice.uptodate=True
+ new_count += 1
+ foreign_slice.sync()
+
+ ### handle node_ids
+ # in slice we get a set of node_ids
+ # but these ids are RELATIVE TO THE PEER
+ # so we need to figure the local node_id for these nodes
+ # we do this through peer_foreign_nodes
+ # dictify once
+ peer_foreign_nodes_dict = {}
+ for foreign_node in peer_foreign_nodes:
+ peer_foreign_nodes_dict[foreign_node['node_id']]=foreign_node
+ updated_node_ids = []
+ for alien_node_id in slice['node_ids']:
+ try:
+ local_node_id=self.locate_alien_node_id_in_foreign_nodes(peer_foreign_nodes_dict,alien_node_id)
+ updated_node_ids.append(local_node_id)
+ except:
+ # this node_id is not in our scope
+ pass
+ foreign_slice.update_slice_nodes (updated_node_ids)
+
+ ### delete entries that are not uptodate
+ for foreign_slice in local_foreign_slices:
+ if not foreign_slice.uptodate:
+ foreign_slice.delete()
+
+ return new_count-old_count
+
class Peers (Table):
"""
Maps to the peers table in the database
--
-- Copyright (C) 2006 The Trustees of Princeton University
--
--- $Id: planetlab4.sql,v 1.33 2006/11/13 18:41:59 mlhuang Exp $
+-- $Id: planetlab4.sql,v 1.34 2006/11/14 09:44:40 thierry Exp $
--
--------------------------------------------------------------------------------
CREATE TABLE peer_node (
peer_id integer REFERENCES peers NOT NULL, -- Peer identifier
node_id integer REFERENCES nodes NOT NULL, -- (Local) node identifier
- foreign_id integer NOT NULL, -- (Peer) node identifier
PRIMARY KEY (peer_id, node_id),
UNIQUE (node_id) -- Nodes can only be at one peer
) WITH OIDS;
-- Slices
CREATE TABLE slices (
slice_id serial PRIMARY KEY, -- Slice identifier
- site_id integer REFERENCES sites NOT NULL, -- Site identifier
+-- xxx temporarily remove the NOT NULL constraint
+ site_id integer REFERENCES sites, -- Site identifier
name text NOT NULL, -- Slice name
instantiation text REFERENCES slice_instantiations NOT NULL DEFAULT 'plc-instantiated', -- Slice state, e.g. plc-instantiated
url text, -- Project URL
max_nodes integer NOT NULL DEFAULT 100, -- Maximum number of nodes that can be assigned to this slice
- creator_person_id integer REFERENCES persons NOT NULL, -- Creator
+-- xxx temporarily remove the NOT NULL constraint
+ creator_person_id integer REFERENCES persons, -- Creator
created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP, -- Creation date
expires timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP + '2 weeks', -- Expiration date
CREATE TABLE peer_slice (
peer_id integer REFERENCES peers NOT NULL, -- peer primary key
slice_id integer REFERENCES slices NOT NULL, -- node primary key
- foreign_id integer NOT NULL,
PRIMARY KEY (peer_id, slice_id)
) WITH OIDS;
CREATE INDEX peer_slice_peer_id_idx ON peer_slice (peer_id);
LEFT JOIN slice_nodes USING (slice_id)
LEFT JOIN slice_persons USING (slice_id)
LEFT JOIN slice_attributes USING (slice_id)
-WHERE peer_slice.peer_id IS NULL;
+WHERE peer_slice.peer_id IS NULL
+AND slices.site_id IS NOT NULL
+AND slices.creator_person_id IS NOT NULL;
CREATE VIEW view_foreign_slices AS
SELECT
slices.max_nodes,
slices.deleted,
CAST(date_part('epoch', slices.created) AS bigint) AS created,
-CAST(date_part('epoch', slices.expires) AS bigint) AS expires
+CAST(date_part('epoch', slices.expires) AS bigint) AS expires,
+COALESCE(slice_nodes.node_ids, '{}') AS node_ids
FROM slices
LEFT JOIN peer_slice USING (slice_id)
+LEFT JOIN slice_nodes USING (slice_id)
WHERE peer_slice.peer_id IS NOT NULL;
--