From 56b2bed39de364cd20ff76fb022f7f65a5f21410 Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Fri, 10 Nov 2006 15:05:52 +0000 Subject: [PATCH] peer x foreign_node relationship in a separate peer_node table -- uses new db interface --- PLC/ForeignNodes.py | 19 ++-- PLC/Methods/RefreshPeer.py | 71 ++++---------- PLC/Peers.py | 92 ++++++++++++++++++ TestPeers.py | 187 +++++++++++++++++++------------------ planetlab4.sql | 43 ++++----- 5 files changed, 242 insertions(+), 170 deletions(-) diff --git a/PLC/ForeignNodes.py b/PLC/ForeignNodes.py index 2518c18..61e892b 100644 --- a/PLC/ForeignNodes.py +++ b/PLC/ForeignNodes.py @@ -24,19 +24,25 @@ class ForeignNode (Row) : 'boot_state' : Parameter (str, "Boot state, see Node"), 'model' : Parameter (str,"Model, see Node"), 'version' : Parameter (str,"Version, see Node"), - 'date_created': Parameter(int, "Creation time, see Node"), - 'last_updated': Parameter(int, "Update time, see Node"), +# 'date_created': Parameter(int, "Creation time, see Node"), +# 'last_updated': Parameter(int, "Update time, see Node"), } def __init__(self,api,fields={},uptodate=True): Row.__init__(self,api,fields) self.uptodate=uptodate + def purge_peer_node (self,commit=True): + sql = "DELETE FROM peer_node WHERE node_id=%d"%self['node_id'] + self.api.db.do(sql) + if commit: + self.api.db.commit() + def delete (self, commit=True): """ Delete existing foreign node. """ - print 'in ForeignNode::delete',self + self.purge_peer_node() self['deleted']=True self.sync(commit) @@ -45,9 +51,10 @@ class ForeignNodes (Table): def __init__ (self, api, foreign_node_filter = None, columns = None): Table.__init__(self, api, ForeignNode, columns) - sql = "SELECT %s FROM view_foreign_nodes WHERE deleted IS False" % \ - ", ".join(self.columns) - + sql = "" + sql += "SELECT %s FROM view_foreign_nodes " % ", ".join(self.columns) + sql += "WHERE deleted IS False " + if foreign_node_filter is not None: if isinstance(foreign_node_filter, (list, tuple, set)): # Separate the list into integers and strings diff --git a/PLC/Methods/RefreshPeer.py b/PLC/Methods/RefreshPeer.py index dd4e339..3657e3b 100644 --- a/PLC/Methods/RefreshPeer.py +++ b/PLC/Methods/RefreshPeer.py @@ -19,7 +19,7 @@ class RefreshPeer(Method): Query a peer PLC for its list of nodes, and refreshes the local database accordingly - Returns None + Returns the number of new nodes from that peer - may be negative """ roles = ['admin'] @@ -27,18 +27,22 @@ class RefreshPeer(Method): accepts = [ Auth(), Parameter (int, "Peer id") ] - returns = None + returns = Parameter(int, "Delta in number of foreign nodes attached to that peer") def call (self, auth, peer_id): ### retrieve peer info - peers = Peers (self.api) - peer = peers[peer_id] + peers = Peers (self.api,[peer_id]) + if not peers: + raise PLCInvalidArgument,'no such peer_id:%d'%peer_id + peer=peers[0] ### retrieve account info person_id = peer['person_id'] persons = Persons (self.api,[person_id]) - person = persons[person_id] + if not persons: + raise PLCInvalidArgument,'no such person_id:%d'%person_id + person = persons[0] ### build up foreign auth auth={ 'Username': person['email'], @@ -47,57 +51,22 @@ class RefreshPeer(Method): 'Role' : 'admin' } ## connect to the peer's API - apiserver = xmlrpclib.Server (peer['peer_url']+"/PLCAPI/") - print 'auth',auth - current_peer_nodes = apiserver.GetNodes(auth,[]) - + url=peer['peer_url']+"/PLCAPI/" + print 'url=',url + apiserver = xmlrpclib.Server (url) + print 'auth=',auth + current_peer_nodes = apiserver.GetNodes(auth) + print 'current_peer_nodes',current_peer_nodes + ## manual feed for tests -# n1 = {'hostname': 'n1.plc', 'boot_state': 'inst'} -# n2 = {'hostname': 'n2.plc', 'boot_state': 'inst'} -# n3 = {'hostname': 'n3.plc', 'boot_state': 'inst'} n11={'session': None, 'slice_ids': [], 'nodegroup_ids': [], 'last_updated': 1162884349, 'version': None, 'nodenetwork_ids': [], 'boot_state': 'inst', 'hostname': 'n11.plc1.org', 'site_id': 1, 'ports': None, 'pcu_ids': [], 'boot_nonce': None, 'node_id': 1, 'root_person_ids': [], 'key': None, 'date_created': 1162884349, 'model': None, 'conf_file_ids': [], 'ssh_rsa_key': None} n12={'session': None, 'slice_ids': [], 'nodegroup_ids': [], 'last_updated': 1162884349, 'version': None, 'nodenetwork_ids': [], 'boot_state': 'inst', 'hostname': 'n12.plc1.org', 'site_id': 1, 'ports': None, 'pcu_ids': [], 'boot_nonce': None, 'node_id': 1, 'root_person_ids': [], 'key': None, 'date_created': 1162884349, 'model': None, 'conf_file_ids': [], 'ssh_rsa_key': None} n21={'session': None, 'slice_ids': [], 'nodegroup_ids': [], 'last_updated': 1162884349, 'version': None, 'nodenetwork_ids': [], 'boot_state': 'boot', 'hostname': 'n21.plc2.org', 'site_id': 1, 'ports': None, 'pcu_ids': [], 'boot_nonce': None, 'node_id': 1, 'root_person_ids': [], 'key': None, 'date_created': 1162884349, 'model': None, 'conf_file_ids': [], 'ssh_rsa_key': None} n22={'session': None, 'slice_ids': [], 'nodegroup_ids': [], 'last_updated': 1162884349, 'version': None, 'nodenetwork_ids': [], 'boot_state': 'boot', 'hostname': 'n22.plc2.org', 'site_id': 1, 'ports': None, 'pcu_ids': [], 'boot_nonce': None, 'node_id': 1, 'root_person_ids': [], 'key': None, 'date_created': 1162884349, 'model': None, 'conf_file_ids': [], 'ssh_rsa_key': None} -# current_peer_nodes = [n21,n22] +# current_peer_nodes = [] +# print 'current_peer_nodes',current_peer_nodes - ### now to the db - # we get the whole table just in case - # a host would have switched from one plc to the other - foreign_nodes = ForeignNodes (self.api) - - ### mark entries for this peer outofdate - for foreign_node in foreign_nodes: - if foreign_node['peer_id'] == peer_id: - foreign_node.uptodate=False + nb_new_nodes = peer.refresh_nodes(current_peer_nodes) - ### these fields get copied through - remote_fields = ['boot_state','model','version','date_created','date_updated'] - - ### scan the new entries, and mark them uptodate - for node in current_peer_nodes: - hostname = node['hostname'] - foreign_node = foreign_nodes.get(hostname) - if foreign_node: - ### update it anyway - foreign_node['cached'] = True - foreign_node['peer_id'] = peer_id - # copy other relevant fields - for field in remote_fields: - foreign_node[field]=node[field] - # this row is valid - foreign_node.uptodate = True - else: - foreign_nodes[hostname] = ForeignNode(self.api, - {'hostname':hostname, - 'cached':True, - 'peer_id':peer_id,}) - for field in remote_fields: - foreign_nodes[hostname][field]=node[field] - - foreign_nodes[hostname].sync() - - ### delete entries that are not uptodate - [ x.delete() for x in foreign_nodes if not x.uptodate ] - + return nb_new_nodes diff --git a/PLC/Peers.py b/PLC/Peers.py index 8be0521..5499994 100644 --- a/PLC/Peers.py +++ b/PLC/Peers.py @@ -11,6 +11,8 @@ from PLC.Parameter import Parameter from PLC.Filter import Filter from PLC.Table import Row, Table +from PLC.ForeignNodes import ForeignNodes,ForeignNode + class Peer (Row): """ Stores the list of peering PLCs in the peers table. @@ -36,6 +38,96 @@ class Peer (Row): raise invalid_url return url + def manage_node (self, foreign_node, add_if_true_del_if_false=True, commit=True): + """ + Add foreign node to a peer + """ + + assert 'peer_id' in self + assert 'node_id' in foreign_node + + peer_id = self['peer_id'] + node_id = foreign_node ['node_id'] + + if add_if_true_del_if_false: + ### ADDING + sql = "INSERT INTO peer_node VALUES (%d,%d)" % (peer_id,node_id) + self.api.db.do(sql) + if self['node_ids'] is None: + self['node_ids']=[node_id,] + self['node_ids'].append(node_id) + ### DELETING + else: + sql = "DELETE FROM peer_node WHERE peer_id=%d AND node_id=%d" % (peer_id,node_id) + self.api.db.do(sql) + self['node_ids'].remove(node_id) + + if commit: + self.api.db.commit() + + def refresh_nodes (self, current_peer_nodes): + """ + refreshes the foreign_nodes and peer_node tables + expected input is the current list of nodes as returned by GetNodes + + returns the number of new nodes on this peer (can be negative) + """ + + peer_id = self['peer_id'] + + # we get the whole table just in case + # a host would have switched from one plc to the other + local_foreign_nodes = ForeignNodes (self.api) + + ### mark entries for this peer outofdate + old_count=0; + for foreign_node in local_foreign_nodes: + if foreign_node['peer_id'] == peer_id: + foreign_node.uptodate=False + old_count += 1 + + ### these fields get copied through + ### xxx need to figure how to revert unix timestamp to db timestamp format +# remote_fields = ['boot_state','model','version','date_created','last_updated'] + remote_fields = ['boot_state','model','version'] + + ### scan the new entries, and mark them uptodate + for node in current_peer_nodes: + hostname = node['hostname'] + try: + foreign_node = ForeignNodes(self.api,{'hostname':hostname})[0] + if foreign_node['peer_id'] != peer_id: + ### the node has changed its plc, needs to update peer_node + old_peer_id = foreign_node['peer_id'] + old_peers=Peers(self.api,[peer_id]) + assert old_peer[0] + old_peers[0].manage_node(foreign_node,False) + self.manage_node(foreign_node,True) + foreign_node['peer_id'] = peer_id + ### update it anyway: copy other relevant fields + for field in remote_fields: + foreign_node[field]=node[field] + # this row is now valid + foreign_node.uptodate=True + foreign_node.sync() + except: + new_foreign_node = ForeignNode(self.api, {'hostname':hostname}) + for field in remote_fields: + new_foreign_node[field]=node[field] + ### need to sync so we get a node_id + new_foreign_node.sync() + new_foreign_node.uptodate = True + self.manage_node(new_foreign_node,True) + + + ### delete entries that are not uptodate + for foreign_node in local_foreign_nodes: + if not foreign_node.uptodate: + foreign_node.delete() + + return len(current_peer_nodes)-old_count + + def delete (self, commit=True): """ Delete peer diff --git a/TestPeers.py b/TestPeers.py index 49b3aa9..5581fb6 100755 --- a/TestPeers.py +++ b/TestPeers.py @@ -13,59 +13,36 @@ ### instead we create a PI account on the site_id=1 ### ############################## -### -### HOWTO -### -### ---------------------------------------- -### # run sql commands - PLC1 -### -### $ chroot /plc1/root psql planetlab4 pgsqluser -### -### # run sql commands - PLC2 -### -### $ chroot /plc2/root psql -h localhost -p 5433 planetlab4 pgsqluser -### but then a password is required -### 9a61ae18-779e-41b6-8a6c-67c439dc73e5 -### -### ---------------------------------------- -### # connecting to the API - PLC1 -### -### $ chroot /plc1/root -### $ ./Shell.py --config /plc1/root/data/etc/planetlab/plc_config -### -### # connecting to the API - PLC2 -### -### $ chroot /plc2/root -### -### $ ./Shell.py --config /plc2/root/data/etc/planetlab/plc_config -h https://localhost:444/PLCAPI/ -### -### ---------------------------------------- -############################## import xmlrpclib plc1={ 'name':'plc1 in federation', - 'root':'/plc1/root', 'url':'https://lurch.cs.princeton.edu:443/', - 'admin_id':'plc1@planet-lab.org', - 'admin_password':'root', - 'dbport':5432, + 'builtin_admin_id':'root@localhost.localdomain', + 'builtin_admin_password':'root', + 'peer_admin_name':'plc1@planet-lab.org', + 'peer_admin_password':'peer', 'nodename':'n11.plc1.org' } plc2={ 'name':'plc2 in federation', - 'root':'/plc1/root', - 'url':'https://lurch.cs.princeton.edu:444/', - 'admin_id':'plc2@planet-lab.org', - 'admin_password':'root', - 'dbport':5433, + 'url':'https://planetlab-devbox.inria.fr:443/', + 'builtin_admin_id':'root@localhost.localdomain', + 'builtin_admin_password':'root', + 'peer_admin_name':'plc2@planet-lab.org', + 'peer_admin_password':'peer', 'nodename':'n21.plc2.org' } -plc=[plc1,plc2] +## we use indexes 1 and 2 +plc=[None,None,None] # the server objects -s=[None,None] +s=[None,None,None] # the authentication objects -a=[None,None] +a=[None,None,None] +aa=[None,None,None] + +def peer_index(i): + return 3-i ### cant use digits in slice login name def plain_name (i): @@ -76,72 +53,98 @@ def plain_name (i): else: raise Exception,"Unexpected input in plain_name" -def test00_init (args=[0,1]): +def test00_init (args=[1,2]): + global plc,s,a,aa + ## have you loaded this file already (support for reload) + if plc[1]: + pass + else: + plc=[None,plc1,plc2] + for i in args: + url=plc[i]['url']+'/PLCAPI/' + s[i]=xmlrpclib.Server(url) + print 'initializing s[%d]'%i,url + aa[i]={'Username':plc[i]['builtin_admin_id'], + 'AuthMethod':'password', + 'AuthString':plc[i]['builtin_admin_password'], + 'Role':'admin'} + print 'initialized aa[%d]'%i, aa[i] + a[i]={'Username':plc[i]['peer_admin_name'], + 'AuthMethod':'password', + 'AuthString':plc[i]['peer_admin_password'], + 'Role':'admin'} + print 'initialized a[%d]'%i, a[i] + +def test00_print (args=[1,2]): + global plc,s,a,aa + for i in args: + print 's[%d]'%i,s[i] + print 'aa[%d]'%i, aa[i] + print 'a[%d]'%i, a[i] + +def test00_admin (args=[1,2]): global plc,s,a for i in args: - url=plc[i]['url']+'/PLCAPI/' - s[i]=xmlrpclib.Server(url) - print 'initializing s[%d]'%i,url - a[i]={'Username':plc[i]['admin_id'], - 'AuthMethod':'password', - 'AuthString':plc[i]['admin_password'], - 'Role':'admin'} - print 'initialized a[%d]'%i, a[i] - -def test00_check (args=[0,1]): + peer=peer_index(i) + person_id=s[i].AddPerson(aa[i],{'first_name':'Local', 'last_name':'PeerPoint', 'role_ids':[10], + 'email':plc[i]['peer_admin_name'],'password':plc[i]['peer_admin_password']}) + print '%02d: created peer admin account %d, %s - %s'%(i,person_id,plc[i]['peer_admin_name'],plc[i]['peer_admin_password']) + plc[i]['peer_admin_id']=person_id + +def test00_enable (args=[1,2]): global plc,s,a for i in args: - n=len(s[i].GetNodes(a[i])) - f=len(s[i].GetForeignNodes(a[i])) - print 'Checking connection: got %d local nodes & %d foreign nodes'%(n,f) + peer=peer_index(i) + s[i].AdmSetPersonEnabled(aa[i],plc[i]['peer_admin_id'],True) + s[i].AddRoleToPerson(aa[i],'admin',plc[i]['peer_admin_id']) + print '%02d: enabled+admin on account %d:%s'%(i,plc[i]['peer_admin_id'],plc[i]['peer_admin_name']) -def test01_pi (args=[0,1]): +def test01_check (args=[1,2]): global plc,s,a for i in args: - peer=1-i - plc[i]['pi_id']=s[i].AddPerson(a[i],{'first_name':'Local', 'last_name':'PI', 'role_ids':[20], - 'email':plc[i]['admin_id'],'password':plc[id]['admin_password']}) + n=len(s[i].GetNodes(aa[i])) + f=len(s[i].GetForeignNodes(a[i])) + print '%02d: Checking connection: got %d local nodes & %d foreign nodes'%(i,n,f) -def test01_node (args=[0,1]): +def test01_node (args=[1,2]): global plc,s,a for i in args: n=s[i].AddNode(a[i],1,{'hostname': plc[i]['nodename']}) - print '%02d: Added node %d %s',(i+1,n,plc[i]['nodename']) + print '%02d: Added node %d %s'%(i,n,plc[i]['nodename']) -def test01_peer_person (args=[0,1]): +def test01_peer_person (args=[1,2]): global plc,s,a for i in args: - peer=1-i - person_id = s[i].AddPerson (a[i], {'first_name':'Peering', 'last_name':plc[peer]['name'], 'role_ids':[3000], - 'email':plc[peer]['admin_id'],'password':plc[peer]['admin_password']}) - print '02%d:Created person %d as the peer person'%(i+1,person_id) + peer=peer_index(i) + person_id = s[i].AddPerson (a[i], {'first_name':'Peering(plain passwd)', 'last_name':plc[peer]['name'], 'role_ids':[3000], + 'email':plc[peer]['peer_admin_name'],'password':plc[peer]['peer_admin_password']}) + print '02%d:Created person %d as the peer person'%(i,person_id) plc[i]['peer_person_id']=person_id -def test01_peer (args=[0,1]): +def test01_peer (args=[1,2]): global plc,s,a for i in args: - peer=1-i + peer=peer_index(i) peer_id=s[i].AddPeer (a[i], {'peername':plc[peer]['name'],'peer_url':plc[peer]['url'],'person_id':plc[i]['peer_person_id']}) # NOTE : need to manually reset the encrypted password through SQL at this point - print '%02d:Created peer %d'%(i+1,peer_id) + print '%02d:Created peer %d'%(i,peer_id) plc[i]['peer_id']=peer_id - print "Please MANUALLY set passwd for person_id=%d in DB%d"%(person_id,i+1) + print "Please MANUALLY set passwd for person_id=%d in DB%d"%(plc[i]['peer_person_id'],i) -def test02_refresh (args=[0,1]): +def test02_refresh (args=[1,2]): global plc,s,a for i in args: - print '%02d: Refreshing peer'%(i+1) - s[i].RefreshPeer(plc[i]['peer_id']) - ###### at this stage both sites know about two nodes, one local and one foreign + print '%02d: Refreshing peer'%(i) + s[i].RefreshPeer(a[i],plc[i]['peer_id']) -def test03_site (args=[0,1]): +def test03_site (args=[1,2]): global plc,s,a for i in args: - peer=1-i + peer=peer_index(i) ### create a site (required for creating a slice) - sitename="site"+str(i+1) - abbrev_name="abbr"+str(i+1) - plain=plain_name(i+1) + sitename="site"+str(i) + abbrev_name="abbr"+str(i) + plain=plain_name(i) site_id=s[i].AddSite (a[i], {'name':plc[i]['name'], 'abbreviated_name': abbrev_name, 'login_base': plain, @@ -150,40 +153,46 @@ def test03_site (args=[0,1]): 'max_slices':10}) ### max_slices does not seem taken into account at that stage s[i].UpdateSite(a[i],plc[i]['site_id'],{'max_slices':10}) - print '%02d: Created site %d with max_slices=10'%(i+1,site_id) + print '%02d: Created site %d with max_slices=10'%(i,site_id) plc[i]['site_id']=site_id -def test03_slice (args=[0,1]): +def test03_slice (args=[1,2]): global plc,s,a for i in args: - peer=1-i - plain=plain_name(i+1) + peer=peer_index(i) + plain=plain_name(i) ### create a slice - slice_name="slic"+str(i+1) + slice_name="slic"+str(i) slice_id=s[i].AddSlice (a[i],{'name':'%s_%s'%(plain,slice_name), 'description':'slice %s_%s on plc %s'%(plain,slice_name,plc[i]['name']), 'url':'http://planet-lab.org/%s'%slice_name, 'max_nodes':100, 'instanciation':'plc-instantiated', }) - print '%02d: created slice %d'%(i+1,slice_id) + print '%02d: created slice %d'%(i,slice_id) plc[i]['slice_id']=slice_id -def test04_lnode (args=[0,1]): +def test04_lnode (args=[1,2]): global plc,s,a for i in args: ### add node to it hostname=plc[i]['nodename'] s[i].AddSliceToNodes (a[i], plc[i]['slice_id'],hostname) - print '%02d: added local node %s'%(i+1,hostname) + print '%02d: added local node %s'%(i,hostname) -def test04_fnode (args=[0,1]): +def test04_fnode (args=[1,2]): global plc,s,a for i in args: - peer=1-i + peer=peer_index(i) ### add node to it hostname=plc[peer]['nodename'] s[i].AddSliceToNodes (a[i], plc[i]['slice_id'],hostname) - print '%02d: added local node %s'%(i+1,hostname) + print '%02d: added local node %s'%(i,hostname) + +def catch_up (args=[1,2]): + for i in args: + plc[i]['peer_admin_id']=3 + plc[i]['peer_person_id']=4 + plc[i]['peer_id']=1 diff --git a/planetlab4.sql b/planetlab4.sql index 290785c..7cb748b 100644 --- a/planetlab4.sql +++ b/planetlab4.sql @@ -9,7 +9,7 @@ -- -- Copyright (C) 2006 The Trustees of Princeton University -- --- $Id: planetlab4.sql,v 1.28 2006/11/08 17:34:07 thierry Exp $ +-- $Id: planetlab4.sql,v 1.29 2006/11/08 22:07:29 mlhuang Exp $ -- -------------------------------------------------------------------------------- @@ -271,8 +271,8 @@ CREATE TABLE nodes ( hostname text NOT NULL, -- Node hostname site_id integer REFERENCES sites, -- At which site (clause NOT NULL removed for foreign_nodes) boot_state text REFERENCES boot_states NOT NULL DEFAULT 'inst', -- Node boot state - cached boolean NOT NULL DEFAULT false, -- is this entry cached from a peer ? - peer_id integer REFERENCES peers, -- if cached, then from what peer +-- cached boolean NOT NULL DEFAULT false, -- is this entry cached from a peer ? +-- peer_id integer REFERENCES peers, -- if cached, then from what peer deleted boolean NOT NULL DEFAULT false, -- Is deleted -- Optional @@ -297,11 +297,20 @@ array_accum(node_id) AS node_ids FROM nodes GROUP BY site_id; +-- Nodes - peers relationship +CREATE TABLE peer_node ( + peer_id integer REFERENCES peers NOT NULL, -- peer primary key + node_id integer REFERENCES nodes NOT NULL, -- node primary key + PRIMARY KEY (peer_id, node_id) +) WITH OIDS; +CREATE INDEX peer_node_peer_id_idx ON peer_node (peer_id); +CREATE INDEX peer_node_node_id_idx ON peer_node (node_id); + -- Nodes at each peer CREATE VIEW peer_nodes AS SELECT peer_id, -array_to_string(array_accum(node_id), ',') AS node_ids -FROM nodes +array_accum(node_id) AS node_ids +FROM peer_node GROUP BY peer_id; CREATE VIEW view_peers AS @@ -807,19 +816,20 @@ COALESCE(node_pcus.ports, '{}') AS ports, COALESCE(node_conf_files.conf_file_ids, '{}') AS conf_file_ids, node_session.session_id AS session FROM nodes +LEFT JOIN peer_node USING (node_id) LEFT JOIN node_nodenetworks USING (node_id) LEFT JOIN node_nodegroups USING (node_id) LEFT JOIN node_slices USING (node_id) LEFT JOIN node_pcus USING (node_id) LEFT JOIN node_conf_files USING (node_id) LEFT JOIN node_session USING (node_id) -WHERE nodes.cached=False; +WHERE peer_node.peer_id IS NULL; CREATE VIEW view_foreign_nodes AS SELECT nodes.node_id, nodes.hostname, -nodes.peer_id, +peer_node.peer_id, nodes.boot_state, nodes.model, nodes.version, @@ -828,8 +838,9 @@ CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated, node_slices.slice_ids, nodes.deleted FROM nodes +LEFT JOIN peer_node USING (node_id) LEFT JOIN node_slices USING (node_id) -WHERE nodes.cached=True AND nodes.deleted=False; +WHERE peer_node.peer_id IS NOT NULL; CREATE VIEW view_nodegroups AS SELECT @@ -983,21 +994,5 @@ INSERT INTO sites VALUES ('pl', 'PlanetLab Central', 'PLC', 100); --- federation stuff starting here - ---CREATE TABLE foreign_nodes ( --- foreign_node_id serial PRIMARY KEY, -- identifier --- hostname text NOT NULL, --- boot_state text NOT NULL, --- peer_id integer REFERENCES peers NOT NULL, --- --- deleted boolean NOT NULL DEFAULT false ---) WITH OIDS; - ---CREATE VIEW peer_foreign_nodes AS ---SELECT peer_id, ---array_to_string(array_accum(foreign_node_id), ',') AS foreign_node_ids ---FROM foreign_nodes ---GROUP BY peer_id; -- 2.43.0