return local_id
+ # for handling simple n-to-n relation tables, like e.g. slice_node
class XrefTable:
def __init__ (self, api, tablename, class1, class2):
# classname: the type of objects we are talking about; e.g. 'Slice'
# peer_object_list list of objects at a given peer - e.g. peer.GetSlices()
# alien_xref_objs_dict : a dict {'classname':alien_obj_list} e.g. {'Node':peer.GetNodes()}
- # his must match the keys in xref_specs
+ # we need an entry for each class mentioned in the class's foreign_xrefs
# lambda_ignore : the alien objects are ignored if this returns true
def update_table (self,
classname,
lambda_ignore=lambda x:False,
report_name_conflicts = True):
+ verbose ("============================== entering update_table on",classname)
peer_id=self.peer_id
attrs = class_attributes (classname)
## allocate transcoders and xreftables once, for each item in foreign_xrefs
# create a dict 'classname' -> {'transcoder' : ..., 'xref_table' : ...}
- accessories = dict(
- [ (xref_classname,
- {'transcoder':Cache.Transcoder (self.api,xref_classname,alien_xref_objs_dict[xref_classname]),
- 'xref_table':Cache.XrefTable (self.api,xref_spec['table'],classname,xref_classname)})
- for xref_classname,xref_spec in foreign_xrefs.iteritems()])
+ xref_accessories = dict(
+ [ (xref['field'],
+ {'transcoder' : Cache.Transcoder (self.api,xref['class'],alien_xref_objs_dict[xref['class']]),
+ 'xref_table' : Cache.XrefTable (self.api,xref['table'],classname,xref['class'])})
+ for xref in foreign_xrefs ])
+
+ # the fields that are direct references, like e.g. site_id in Node
+ # determined lazily, we need an alien_object to do that, and we may have none here
+ direct_ref_fields = None
### get current local table
# get ALL local objects so as to cope with
if local_object ['peer_id'] is None:
if report_name_conflicts:
### xxx send e-mail
- print '==================== We are in trouble here'
- print 'The %s object named %s is natively defined twice'%(classname,object_name)
- print 'Once on this PLC and once on peer %d'%peer_id
+ print '!!!!!!!!!! We are in trouble here'
+ print 'The %s object named %s is natively defined twice, '%(classname,object_name),
+ print 'once on this PLC and once on peer %d'%peer_id
print 'We dont raise an exception so that the remaining updates can still take place'
+ print '!!!!!!!!!!'
continue
if local_object['peer_id'] != peer_id:
### the object has changed its plc,
### we can assume the object just moved
### needs to update peer_id though
local_object['peer_id'] = peer_id
+ # update all fields as per foreign_fields
+ for field in foreign_fields:
+ local_object[field]=alien_object[field]
verbose ('update_table FOUND',object_name)
except:
### create a new entry
# insert in index
local_objects_index[class_key]=local_object
verbose ('update_table CREATED',object_name)
-
- # go on with update
- for field in foreign_fields:
- local_object[field]=alien_object[field]
+ # update all fields as per foreign_fields
+ for field in foreign_fields:
+ local_object[field]=alien_object[field]
+ # this is tricky; at this point we may have primary_key unspecified,
+ # but we need it for handling xrefs below, so we'd like to sync to get one
+ # on the other hand some required fields may be still missing so
+ # the DB would refuse to sync in this case (e.g. site_id in Node)
+ # so let's fill them with 1 so we can sync, this will be overridden below
+ # lazily determine this set of fields now
+ if direct_ref_fields is None:
+ direct_ref_fields=[]
+ for xref in foreign_xrefs:
+ field=xref['field']
+ verbose('checking field %s for direct_ref'%field)
+ if isinstance(alien_object[field],int):
+ direct_ref_fields.append(field)
+ verbose("FOUND DIRECT REFS",direct_ref_fields)
+ for field in direct_ref_fields:
+ local_object[field]=1
+ verbose('Early sync on',local_object)
+ local_object.sync()
+ verbose('Early syncing of %s, reloading'%object_name)
+ # sigh: now we have to reload it because of side-effects, like e.g. on Slice.expires
+ local_object=table_class(self.api, {class_key:object_name})[0]
+ verbose('After reload',local_object)
# this row is now valid
local_object.uptodate=True
new_count += 1
- local_object.sync()
# manage cross-refs
- for xref_classname,xref_spec in foreign_xrefs.iteritems():
- field=xref_spec['field']
- alien_xref_obj_list = alien_xref_objs_dict[xref_classname]
+ for xref in foreign_xrefs:
+ field=xref['field']
+ alien_xref_obj_list = alien_xref_objs_dict[xref['class']]
alien_value = alien_object[field]
- transcoder = accessories[xref_classname]['transcoder']
+ transcoder = xref_accessories[xref['field']]['transcoder']
if isinstance (alien_value,list):
- verbose ('update_table list-transcoding ',xref_classname,' aliens=',alien_value,)
+ verbose ('update_table list-transcoding ',xref['class'],' aliens=',alien_value,)
local_values=[]
for a in alien_value:
try:
# could not transcode - might be from another peer that we dont know about..
pass
verbose (" transcoded as ",local_values)
- xref_table = accessories[xref_classname]['xref_table']
- # newly created objects dont have xrefs yet
+ xref_table = xref_accessories[xref['field']]['xref_table']
+ # newly created objects dont have xref fields set yet
try:
- former_xrefs=local_object[xref_spec['field']]
+ former_xrefs=local_object[xref['field']]
except:
former_xrefs=[]
xref_table.update_item (local_object[primary_key],
former_xrefs,
local_values)
elif isinstance (alien_value,int):
- verbose ('update_table atom-transcoding ',xref_classname,' aliens=',alien_value,)
+ verbose ('update_table atom-transcoding ',xref['class'],' aliens=',alien_value,)
new_value = transcoder.transcode(alien_value)
local_object[field] = new_value
- local_object.sync()
+
+ ### this object is completely updated, let's save it
+ verbose('FINAL sync on %s:'%object_name,local_object)
+ local_object.sync()
+
### delete entries that are not uptodate
for local_object in local_objects:
return slice['creator_person_id'] == 1
nb_new_slices = self.update_table ('Slice', plocal_slices,
- {'Node': all_nodes, 'Person': all_persons},
+ {'Node': all_nodes, 'Person': all_persons, 'Site': all_sites},
is_system_slice)
# refresh slice attributes
Node.fields['hostname'])],
Filter(Node.fields)),
Parameter([str], "List of fields to return", nullok = True),
- Parameter(str,"scope string, can be either 'all', 'local' or 'foreign'"),
]
returns = [Node.fields]
- def call(self, auth, node_filter = None, return_fields = None, scope = 'all'):
+ def call(self, auth, node_filter = None, return_fields = None):
# Get node information
- nodes = Nodes(self.api, node_filter, return_fields, scope)
+ nodes = Nodes(self.api, node_filter, return_fields)
# Remove admin only fields
if 'admin' not in self.caller['roles']:
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
-# $Id: Nodes.py,v 1.24 2006/11/25 09:35:36 thierry Exp $
+# $Id: Nodes.py,v 1.25 2006/11/28 10:25:03 thierry Exp $
#
from types import StringTypes
# for Cache
class_key = 'hostname'
foreign_fields = ['boot_state','model','version','date_created','last_updated']
- foreign_xrefs = {
+ foreign_xrefs = [
# in this case, we dont need the 'table' but Cache will look it up, so...
- 'Site' : { 'field' : 'site_id' , 'table' : 'unused' } ,
- }
+ {'field' : 'site_id' , 'class' : 'Site' , 'table' : 'unused-on-direct-refs' } ,
+ ]
def validate_hostname(self, hostname):
if not valid_hostname(hostname):
database.
"""
- def __init__(self, api, node_filter = None, columns = None, scope = 'all'):
+ def __init__(self, api, node_filter = None, columns = None):
Table.__init__(self, api, Node, columns)
sql = "SELECT %s FROM view_nodes WHERE deleted IS False" % \
node_filter = Filter(Node.fields, node_filter)
sql += " AND (%s)" % node_filter.sql(api, "AND")
- if scope == 'local':
- sql += " AND (peer_id is NULL) "
- elif scope == 'foreign':
- sql += " AND (peer_id is NOT NULL) "
-
self.selectall(sql)
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
-# $Id: Persons.py,v 1.20 2006/11/24 12:06:00 thierry Exp $
+# $Id: Persons.py,v 1.21 2006/11/25 09:35:36 thierry Exp $
#
from types import StringTypes
class_key = 'email'
foreign_fields = ['first_name', 'last_name', 'title', 'email', 'phone', 'url',
'bio', 'enabled', 'password', 'last_updated', 'date_created']
- #foreign_xrefs = { 'Node' : { 'field' : 'node_ids' ,
- # 'table': 'slice_node' } }
- foreign_xrefs = {
- 'Key' : { 'field' : 'key_ids', 'table' : 'person_key' } ,
- 'Site' : { 'field' : 'site_ids', 'table' : 'person_site'},
-# 'key_ids': Parameter([int], "List of key identifiers"),
+ foreign_xrefs = [
+ {'field' : 'key_ids', 'class': 'Key', 'table' : 'person_key' } ,
+ {'field' : 'site_ids', 'class': 'Site', 'table' : 'person_site'},
+# xxx this is not handled by Cache yet
# 'role_ids': Parameter([int], "List of role identifiers"),
-# 'roles': Parameter([str], "List of roles"),
-# 'site_ids': Parameter([int], "List of site identifiers"),
-# 'slice_ids': Parameter([int], "List of slice identifiers"),
-}
+]
def validate_email(self, email):
"""
foreign_fields = ['abbreviated_name', 'name', 'is_public', 'latitude', 'longitude',
'url', 'date_created', 'last_updated', 'max_slices', 'max_slivers',
]
- foreign_xrefs = {
-#'person_ids',
-#'slice_ids',
-#'node_ids',
-#'address_ids',
-#'pcu_ids',
-}
+ foreign_xrefs = {}
def validate_name(self, name):
if not len(name):
# for Cache
class_key = 'name'
foreign_fields = ['description','min_role_id']
- foreign_xrefs = {}
+ foreign_xrefs = []
def validate_name(self, name):
if not len(name):
fields = {
'slice_id': Parameter(int, "Slice identifier"),
'site_id': Parameter(int, "Identifier of the site to which this slice belongs"),
- 'peer_id': Parameter(int, "Peer at which this slice was created", nullok = True),
'name': Parameter(str, "Slice name", max = 32),
'instantiation': Parameter(str, "Slice instantiation state"),
'url': Parameter(str, "URL further describing this slice", max = 254, nullok = True),
'node_ids': Parameter([int], "List of nodes in this slice", ro = True),
'person_ids': Parameter([int], "List of accounts that can use this slice", ro = True),
'slice_attribute_ids': Parameter([int], "List of slice attributes", ro = True),
+ 'peer_id': Parameter(int, "Peer at which this slice was created", nullok = True),
}
# for Cache
class_key = 'name'
- foreign_fields = ['instantiation', 'url', 'description',
- 'max_nodes', 'created', 'expires']
- foreign_xrefs = {
- 'Node' : { 'field' : 'node_ids' , 'table': 'slice_node' },
- 'Person' : { 'field': 'person_ids', 'table' : 'slice_person'},
- }
+ foreign_fields = ['instantiation', 'url', 'description', 'max_nodes', 'created', 'expires']
+ foreign_xrefs = [
+ {'field': 'node_ids' , 'class': 'Node', 'table': 'slice_node' },
+ {'field': 'person_ids', 'class': 'Person', 'table': 'slice_person'},
+ {'field': 'creator_person_id', 'class': 'Person', 'table': 'unused-on-direct-refs'},
+ {'field': 'site_id', 'class': 'Site', 'table': 'unused-on-direct-refs'},
+ ]
def validate_name(self, name):
# N.B.: Responsibility of the caller to ensure that login_base
fast_flag=fast_mode
def show_test():
- print '%d sites, %d keys, %d persons, %d nodes & %d slices'%(number_sites, number_keys,number_persons,
- number_nodes,number_slices)
+ print '%d sites, %d keys, %d persons, %d nodes & %d slices'%(
+ number_sites, number_keys,number_persons,number_nodes,number_slices)
-def fast():
+def mini():
define_test(1,1,1,1,1,True)
def normal():
define_test (sites=4,keys=2,persons=4,nodes=5,slices=4,fast_mode=False)
+big_factor=4
def big():
- define_test (sites=16,keys=8,persons=16,nodes=20,slices=16,fast_mode=False)
+ global number_sites, number_keys, number_persons, number_nodes, number_slices
+ normal()
+ (number_sites,number_keys,number_persons,number_nodes,number_slices) = [
+ big_factor * x for x in (number_sites,number_keys,number_persons,number_nodes,number_slices)]
+
+huge_factor=50
+def huge():
+ global number_sites, number_keys, number_persons, number_nodes, number_slices
+ normal()
+ (number_sites,number_keys,number_persons,number_nodes,number_slices) = [
+ huge_factor * x for x in (number_sites,number_keys,number_persons,number_nodes,number_slices)]
-fast()
+# use fast by default in interactive mode
+mini()
#normal()
####################
####################
# retrieves node_id from hostname - checks for local nodes only
def get_local_node_id(i,nodename):
- return s[i].GetNodes(a[i],[nodename],None,'local')[0]['node_id']
+ return s[i].GetNodes(a[i],{'hostname':nodename,'peer_id':None})[0]['node_id']
# clean all local nodes - foreign nodes are not supposed to be cleaned up manually
def clean_all_nodes (args=[1,2]):
for i in args:
print '%02d:== Cleaning all nodes'%i
- loc_nodes = s[i].GetNodes(a[i],None,None,'local')
+ loc_nodes = s[i].GetNodes(a[i],{'peer_id':None})
for node in loc_nodes:
print '%02d:==== Cleaning node %d'%(i,node['node_id'])
s[i].DeleteNode(a[i],node['node_id'])
# readable dumps
##############################
def p_site (s):
- print (s['site_id'],s['peer_id'],s['login_base'],s['name'],s['node_ids'])
+ print s['site_id'],s['peer_id'],s['login_base'],s['name'],s['node_ids']
def p_key (k):
- print (k['key_id'],k['peer_id'],k['key'])
+ print k['key_id'],k['peer_id'],k['key']
def p_person (p):
- print (p['person_id'],p['peer_id'],p['email'],'keys:',p['key_ids'],'sites:',p['site_ids'])
+ print p['person_id'],p['peer_id'],p['email'],'keys:',p['key_ids'],'sites:',p['site_ids']
def p_node(n):
- print (n['node_id'],n['peer_id'],n['hostname'],'sls=',n['slice_ids'],'site=',n['site_id'])
+ print n['node_id'],n['peer_id'],n['hostname'],'sls=',n['slice_ids'],'site=',n['site_id']
def p_slice(s):
- print (s['slice_id'],s['peer_id'],s['name'],'nodes=',s['node_ids'],'persons=',s['person_ids'])
- print '---',('sas=',s['slice_attribute_ids'],s['name'],'crp=',s['creator_person_id'])
+ print s['slice_id'],s['peer_id'],s['name'],'nodes=',s['node_ids'],'persons=',s['person_ids']
+ print '---','sas=',s['slice_attribute_ids'],s['name'],'crp=',s['creator_person_id'],'e=',s['expires']
def p_sat(sat):
- print (sat['attribute_type_id'],sat['peer_id'], sat['name'], sat['min_role_id'], sat['description'])
+ print sat['attribute_type_id'],sat['peer_id'], sat['name'], sat['min_role_id'], sat['description']
def p_sa (sa):
- print (sa['slice_attribute_id'],sa['peer_id'],sa['name'],'AT_id:',sa['attribute_type_id'])
- print '---',('v=',sa['value'],'sl=',sa['slice_id'],'n=',sa['node_id'])
-
-def p_sliver (x):
- print ('SLIVERS for : hostname',x['hostname'])
- print ('%d config files'%len(x['conf_files']))
- for sv in x['slivers']:
- p_sliver_slice(sv,x['hostname'])
+ print sa['slice_attribute_id'],sa['peer_id'],sa['name'],'AT_id:',sa['attribute_type_id']
+ print '---','v=',sa['value'],'sl=',sa['slice_id'],'n=',sa['node_id']
import pprint
pretty_printer=pprint.PrettyPrinter(5)
-def p_sliver_slice(sliver,hostname):
- print 'SLIVER on hostname %s, s='%hostname,sliver['name']
- print 'KEYS',
+def p_sliver (margin,x):
+ print margin,'SLIVERS for : hostname',x['hostname']
+ print margin,'%d config files'%len(x['conf_files'])
+ for sv in x['slivers']:
+ p_sliver_slice(margin,sv,x['hostname'])
+
+def p_sliver_slice(margin,sliver,hostname):
+ print margin,'SLIVER on hostname %s, s='%hostname,sliver['name']
+ print margin,'KEYS',
pretty_printer.pprint(sliver['keys'])
- print 'ATTRIBUTES',
+ print margin,'ATTRIBUTES',
pretty_printer.pprint(sliver['attributes'])
def dump (args=[1,2]):
for i in args:
- print 'SITES'
+ print '%02d:============================== DUMPING'%i
+ print '%02d: SITES'%i
[p_site(x) for x in s[i].GetSites(a[i])]
- print 'KEYS'
+ print '%02d: KEYS'%i
[p_key(x) for x in s[i].GetKeys(a[i])]
- print 'PERSONS'
+ print '%02d: PERSONS'%i
[p_person(x) for x in s[i].GetPersons(a[i])]
- print 'NODES'
+ print '%02d: NODES'%i
[p_node(x) for x in s[i].GetNodes(a[i])]
- print 'SLICES'
+ print '%02d: SLICES'%i
[p_slice(x) for x in s[i].GetSlices(a[i])]
- print 'Slice Attribute Types'
+ print '%02d: Slice Attribute Types'%i
[p_sat(x) for x in s[i].GetSliceAttributeTypes(a[i])]
- print 'Slice Attributes'
+ print '%02d: Slice Attributes'%i
[p_sa(x) for x in s[i].GetSliceAttributes(a[i])]
- print 'SLIVERS'
- [p_sliver(x) for x in s[i].GetSlivers(a[i])]
+ print '%02d: SLIVERS'%i
+ [p_sliver('%02d:'%i,x) for x in s[i].GetSlivers(a[i])]
+ print '%02d:============================== END DUMP'%i
## for usage under the api
def pv ():
for s in GetSlivers():
- p_sliver(s)
+ p_sliver('',s)
def all():
print 'SITES'
### ad hoc test sequences
def populate ():
+ timer_start()
test_all_init()
+ timer_show()
test01_site()
+ timer_show()
test02_person()
+ timer_show()
test03_node()
+ timer_show()
test04_slice([1])
+ timer_show()
test04_slice_add_lnode([1])
+ timer_show()
test05_sat()
+ timer_show()
test05_sa([1])
+ timer_show()
test00_refresh ("populate: refreshing peer 1",[1])
+ timer_show()
test04_slice_add_fnode([1])
+ timer_show()
test00_refresh("populate: refresh all")
dump()
+ timer_show()
+# temporary - scratch as needed
def test_now ():
test_all_init()
test_all_sites ()
#####
def usage ():
print "Usage: %s [-n] [-f]"%sys.argv[0]
- print " -f runs faster (1 node - 1 slice)"
- print " -b performs big run (4 times as large as normal)"
print " -n runs test_now instead of test_all"
print " -p runs populate instead of test_all"
+ print " -m run in mini mode (1 instance of each class)"
+ print " -b performs big run (%d times as large as normal)"%big_factor
+ print " -H performs huge run (%d times as large as normal)"%huge_factor
sys.exit(1)
def main ():
try:
- (o,a) = getopt.getopt(sys.argv[1:], "fnpb")
+ (o,a) = getopt.getopt(sys.argv[1:], "mnpbH")
except:
usage()
func = test_all
for (opt,val) in o:
- if opt=='-f':
- fast()
- elif opt=='-b':
- big()
- elif opt=='-n':
+ if opt=='-n':
print 'Running test_now'
func = test_now
elif opt=='-p':
print 'Running populate'
func = populate
+ elif opt=='-m':
+ mini()
+ elif opt=='-b':
+ big()
+ elif opt=='-H':
+ huge()
else:
usage()
if a:
papi1:
rsync -a -v -C ./ root@$(PLC1):new_plc_api/
pplc1:
- rsync -a -v -C ./PLC/ root@$(PLC1):$(CHROOT)$(APIDIR)/PLC/
- rsync -a -v -C ./planetlab4.sql root@$(PLC1):$(CHROOT)$(APIDIR)/planetlab4.sql
+ rsync -a -v -C ./planetlab4.sql ./PLC root@$(PLC1):$(CHROOT)$(APIDIR)/
papi2:
rsync -a -v -C ./ root@$(PLC2):new_plc_api/
pplc2:
- rsync -a -v -C ./PLC/ root@$(PLC2):$(CHROOT)$(APIDIR)/PLC/
- rsync -a -v -C ./planetlab4.sql root@$(PLC2):$(CHROOT)$(APIDIR)/planetlab4.sql
+ rsync -a -v -C ./planetlab4.sql ./PLC root@$(PLC2):$(CHROOT)$(APIDIR)/
####################
-DB=install-schema stop-clients clean-db restart
+DB=install-schema stop-clients clean-db restart-db
WEB=install-api restart
db: $(DB)
@echo 'dropping db'
@chroot $(CHROOT) psql -U postgres --port $(PORT) template1 -c 'drop database planetlab4'
+restart-db:
+ @echo 'restarting db'
+ @chroot $(CHROOT) service plc stop db postgresql httpd
+ @chroot $(CHROOT) service plc start httpd postgresql db
+
restart:
@echo 'Restarting PLC'
@chroot $(CHROOT) service plc restart
cp TestPeers.out TestPeers.ref
cp TestPeers.out.nor TestPeers.ref.nor
-frun:
- python -u ./TestPeers.py -f > TestPeers.fout 2>&1
+mrun:
+ python -u ./TestPeers.py -m > TestPeers.mout 2>&1
brun:
python -u ./TestPeers.py -b > TestPeers.bout 2>&1
prun:
python -u ./TestPeers.py -p > TestPeers.pout 2>&1
+pbrun:
+ python -u ./TestPeers.py -p -b > TestPeers.pbout 2>&1
+phrun:
+ python -u ./TestPeers.py -p -H > TestPeers.phout 2>&1
+
#######
HELP=rpm db-dump http
--
-- Copyright (C) 2006 The Trustees of Princeton University
--
--- $Id: planetlab4.sql,v 1.47 2006/11/27 16:43:31 thierry Exp $
+-- $Id: planetlab4.sql,v 1.48 2006/11/28 10:25:03 thierry Exp $
--
--------------------------------------------------------------------------------
FROM nodes
GROUP BY site_id;
--- Nodes at each peer
-CREATE VIEW peer_nodes AS
-SELECT peer_id,
-array_accum(node_id) AS node_ids
-FROM nodes
-GROUP BY peer_id;
-
--------------------------------------------------------------------------------
-- Node groups
--------------------------------------------------------------------------------
WHERE is_deleted is false
GROUP BY site_id;
-CREATE VIEW peer_slices AS
-SELECT peer_id,
-array_accum(slice_id) AS slice_ids
-FROM slices
-GROUP BY peer_id;
-
-- Slice membership
CREATE TABLE slice_person (
slice_id integer REFERENCES slices NOT NULL, -- Slice identifier
LEFT JOIN person_keys USING (person_id)
LEFT JOIN person_slices USING (person_id);
+-- Nodes at each peer
+CREATE VIEW peer_nodes AS
+SELECT peer_id,
+array_accum(node_id) AS node_ids
+FROM nodes
+GROUP BY peer_id;
+
+CREATE VIEW peer_slices AS
+SELECT peer_id,
+array_accum(slice_id) AS slice_ids
+FROM slices
+GROUP BY peer_id;
+
CREATE VIEW view_peers AS
SELECT
peers.*,