table_name = 'keys'
primary_key = 'key_id'
- join_tables = ['person_key', 'peer_key']
+ join_tables = ['person_key']
fields = {
'key_id': Parameter(int, "Key identifier"),
'key_type': Parameter(str, "Key type"),
'key': Parameter(str, "Key value", max = 4096),
- 'person_id': Parameter(int, "User to which this key belongs", nullok = True),
- 'peer_id': Parameter(int, "Peer to which this key belongs", nullok = True),
- 'peer_key_id': Parameter(int, "Foreign key identifier at peer", nullok = True),
+ 'peer_id': Parameter(int, "Peer at which this node is managed", nullok = True),
}
# for Cache
database.
"""
- def __init__(self, api, key_filter = None, columns = None, peer_id = None):
+ def __init__(self, api, key_filter = None, columns = None):
Table.__init__(self, api, Key, columns)
- sql = "SELECT %s FROM view_keys WHERE is_blacklisted IS False" % \
+ sql = "SELECT %s FROM keys WHERE is_blacklisted IS False" % \
", ".join(self.columns)
- if peer_id is None:
- sql += " AND peer_id IS NULL"
- elif isinstance(peer_id, (int, long)):
- sql += " AND peer_id = %d" % peer_id
-
if key_filter is not None:
if isinstance(key_filter, (list, tuple, set)):
key_filter = Filter(Key.fields, {'key_id': key_filter})
table_name = 'nodes'
primary_key = 'node_id'
- join_tables = ['nodegroup_node', 'conf_file_node', 'nodenetworks', 'pcu_node', 'slice_node', 'slice_attribute', 'node_session', 'peer_node']
+ join_tables = ['nodegroup_node', 'conf_file_node', 'nodenetworks', 'pcu_node', 'slice_node', 'slice_attribute', 'node_session']
fields = {
'node_id': Parameter(int, "Node identifier"),
'hostname': Parameter(str, "Fully qualified hostname", max = 255),
'slice_ids': Parameter([int], "List of slices on this node"),
'pcu_ids': Parameter([int], "List of PCUs that control this node"),
'ports': Parameter([int], "List of PCU ports that this node is connected to"),
- 'peer_id': Parameter(int, "Peer to which this node belongs", nullok = True),
- 'peer_node_id': Parameter(int, "Foreign node identifier at peer", nullok = True),
+ 'peer_id': Parameter(int, "Peer at which this node is managed", nullok = True),
}
# for Cache
return boot_state
- validate_date_created = Row.validate_timestamp
- validate_last_updated = Row.validate_timestamp
+ # timestamps
+ def validate_date_created (self, timestamp):
+ return self.validate_timestamp (timestamp)
+ def validate_last_updated (self, timestamp):
+ return self.validate_timestamp (timestamp)
def delete(self, commit = True):
"""
database.
"""
- def __init__(self, api, node_filter = None, columns = None, peer_id = None):
+ def __init__(self, api, node_filter = None, columns = None):
Table.__init__(self, api, Node, columns)
sql = "SELECT %s FROM view_nodes WHERE deleted IS False" % \
", ".join(self.columns)
- if peer_id is None:
- sql += " AND peer_id IS NULL"
- elif isinstance(peer_id, (int, long)):
- sql += " AND peer_id = %d" % peer_id
-
if node_filter is not None:
if isinstance(node_filter, (list, tuple, set)):
# Separate the list into integers and strings
sql += " AND (%s)" % node_filter.sql(api, "AND")
self.selectall(sql)
+
table_name = 'peers'
primary_key = 'peer_id'
- join_tables = ['peer_site', 'peer_person', 'peer_key', 'peer_node',
- 'peer_slice_attribute_type', 'peer_slice_attribute', 'peer_slice']
fields = {
'peer_id': Parameter (int, "Peer identifier"),
'peername': Parameter (str, "Peer name"),
'key': Parameter(str, "Peer GPG public key"),
'cacert': Parameter(str, "Peer SSL public certificate"),
### cross refs
- 'site_ids': Parameter([int], "List of sites for which this peer is authoritative"),
- 'person_ids': Parameter([int], "List of users for which this peer is authoritative"),
+ 'site_ids': Parameter([int], "List of sites for this peer is authoritative"),
+ 'person_ids': Parameter([int], "List of users for this peer is authoritative"),
'key_ids': Parameter([int], "List of keys for which this peer is authoritative"),
'node_ids': Parameter([int], "List of nodes for which this peer is authoritative"),
+ 'attribute_type_ids': Parameter([int], "List of slice attribute types for which this peer is authoritative"),
+ 'slice_attribute_ids': Parameter([int], "List of slice attributes for which this peer is authoritative"),
'slice_ids': Parameter([int], "List of slices for which this peer is authoritative"),
}
- def validate_peername(self, peername):
- if not len(peername):
- raise PLCInvalidArgument, "Peer name must be specified"
-
- conflicts = Peers(self.api, [peername])
- for peer in conflicts:
- if 'peer_id' not in self or self['peer_id'] != peer['peer_id']:
- raise PLCInvalidArgument, "Peer name already in use"
-
- return peername
-
def validate_peer_url(self, url):
"""
Validate URL. Must be HTTPS.
# Remove all related entities
for obj in \
- Slices(self.api, self['slice_ids'], peer_id = self['peer_id']) + \
- Keys(self.api, self['key_ids'], peer_id = self['peer_id']) + \
- Persons(self.api, self['person_ids'], peer_id = self['peer_id']) + \
- Nodes(self.api, self['node_ids'], peer_id = self['peer_id']) + \
- Sites(self.api, self['site_ids'], peer_id = self['peer_id']):
+ Sites(self.api, self['site_ids']) + \
+ Persons(self.api, self['person_ids']) + \
+ Keys(self.api, self['key_ids']) + \
+ Nodes(self.api, self['node_ids']) + \
+ SliceAttributeTypes(self.api, self['attribute_type_ids']) + \
+ SliceAttributes(self.api, self['slice_attribute_ids']) + \
+ Slices(self.api, self['slice_ids']):
assert obj['peer_id'] == self['peer_id']
obj.delete(commit = False)
self['deleted'] = True
self.sync(commit)
- def add_site(self, site, peer_site_id, commit = True):
- """
- Associate a local site entry with this peer.
- """
-
- add = Row.add_object(Site, 'peer_site')
- add(self, site,
- {'peer_id': self['peer_id'],
- 'site_id': site['site_id'],
- 'peer_site_id': peer_site_id},
- commit = commit)
-
- def add_person(self, person, peer_person_id, commit = True):
- """
- Associate a local user entry with this peer.
- """
-
- add = Row.add_object(Person, 'peer_person')
- add(self, person,
- {'peer_id': self['peer_id'],
- 'person_id': person['person_id'],
- 'peer_person_id': peer_person_id},
- commit = commit)
-
- def add_key(self, key, peer_key_id, commit = True):
- """
- Associate a local key entry with this peer.
- """
-
- add = Row.add_object(Key, 'peer_key')
- add(self, key,
- {'peer_id': self['peer_id'],
- 'key_id': key['key_id'],
- 'peer_key_id': peer_key_id},
- commit = commit)
-
- def add_node(self, node, peer_node_id, commit = True):
- """
- Associate a local node entry with this peer.
- """
-
- add = Row.add_object(Node, 'peer_node')
- add(self, node,
- {'peer_id': self['peer_id'],
- 'node_id': node['node_id'],
- 'peer_node_id': peer_node_id},
- commit = commit)
-
- def add_slice_attribute_type(self, slice_attribute_type, peer_attribute_type_id, commit = True):
- """
- Associate a local slice attribute type entry with this peer.
- """
-
- add = Row.add_object(SliceAttributeType, 'peer_slice_attribute_type')
- add(self, slice_attribute_type,
- {'peer_id': self['peer_id'],
- 'attribute_type_id': slice_attribute_type['attribute_type_id'],
- 'peer_attribute_type_id': peer_attribute_type_id},
- commit = commit)
-
- def add_slice_attribute(self, slice_attribute, peer_slice_attribute_id, commit = True):
- """
- Associate a local slice_attribute entry with this peer.
- """
-
- add = Row.add_object(SliceAttribute, 'peer_slice_attribute')
- add(self, slice_attribute,
- {'peer_id': self['peer_id'],
- 'slice_attribute_id': slice_attribute['slice_attribute_id'],
- 'peer_slice_attribute_id': peer_slice_attribute_id},
- commit = commit)
-
- def add_slice(self, slice, peer_slice_id, commit = True):
- """
- Associate a local slice entry with this peer.
- """
-
- add = Row.add_object(Slice, 'peer_slice')
- add(self, slice,
- {'peer_id': self['peer_id'],
- 'slice_id': slice['slice_id'],
- 'peer_slice_id': peer_slice_id},
- commit = commit)
-
def connect(self, **kwds):
"""
Connect to this peer via XML-RPC.
return wrapper
- def __getattr__(self, attr):
+ def __getattr__(self, methodname):
"""
- Returns a callable API function if attr is the name of a
- PLCAPI function; otherwise, returns the specified attribute.
+ Fetch a callable for the specified method.
"""
+ function = getattr(self.server, methodname)
+
try:
- # Figure out if the specified attribute is the name of a
- # PLCAPI function. If so and the function requires an
- # authentication structure as its first argument, return a
- # callable that automagically adds an auth struct to the
- # call.
- methodname = attr
+ # Figure out if the function is a PLCAPI function and
+ # requires an authentication structure as its first
+ # argument.
api_function = self.api.callable(methodname)
if api_function.accepts and \
(isinstance(api_function.accepts[0], PLC.Auth.Auth) or \
(isinstance(api_function.accepts[0], Mixed) and \
- filter(lambda param: isinstance(param, Auth), api_function.accepts[0]))):
- function = getattr(self.server, methodname)
- return self.add_auth(function, methodname)
+ filter(lambda param: isinstance(param, Auth), func.accepts[0]))):
+ function = self.add_auth(function, methodname)
except Exception, err:
pass
- if hasattr(self, attr):
- return getattr(self, attr)
- else:
- raise AttributeError, "type object 'Peer' has no attribute '%s'" % attr
+ return function
class Peers (Table):
"""
-#!/usr/bin/python
+#!/usr/bin/env ./Shell.py
#
# Test script example
#
from pprint import pprint
from string import letters, digits, punctuation
-from traceback import print_exc
-from optparse import OptionParser
+import re
+import socket
+import struct
import base64
import os
from random import Random
random = Random()
-from PLC.Shell import Shell
-shell = Shell(globals())
-
def randfloat(min = 0.0, max = 1.0):
return float(min) + (random.random() * (float(max) - float(min)))
base64.b64encode(''.join(randstr(bits / 8).encode("utf-8"))),
randemail()])
-def random_site():
- return {
- 'name': randstr(254),
- 'abbreviated_name': randstr(50),
- 'login_base': randstr(20, letters).lower(),
- 'latitude': int(randfloat(-90.0, 90.0) * 1000) / 1000.0,
- 'longitude': int(randfloat(-180.0, 180.0) * 1000) / 1000.0,
- }
+admin = {'AuthMethod': "capability",
+ 'Username': config.PLC_API_MAINTENANCE_USER,
+ 'AuthString': config.PLC_API_MAINTENANCE_PASSWORD,
+ 'Role': "admin"}
+
+user = {'AuthMethod': "password",
+ 'Role': "user"}
+
+pi = {'AuthMethod': "password",
+ 'Role': "pi"}
+
+tech = {'AuthMethod': "password",
+ 'Role': "tech"}
+
+# Add sites
+site_ids = []
+for i in range(3):
+ def random_site():
+ return {
+ 'name': randstr(254),
+ 'abbreviated_name': randstr(50),
+ 'login_base': randstr(20, letters).lower(),
+ 'latitude': int(randfloat(-90.0, 90.0) * 1000) / 1000.0,
+ 'longitude': int(randfloat(-180.0, 180.0) * 1000) / 1000.0,
+ }
-def random_address_type():
- return {
- 'name': randstr(20),
- 'description': randstr(254),
- }
-
-def random_address():
- return {
- 'line1': randstr(254),
- 'line2': randstr(254),
- 'line3': randstr(254),
- 'city': randstr(254),
- 'state': randstr(254),
- 'postalcode': randstr(64),
- 'country': randstr(128),
- }
-
-def random_person():
- return {
- 'first_name': randstr(128),
- 'last_name': randstr(128),
- 'email': randemail(),
- 'bio': randstr(254),
- # Accounts are disabled by default
- 'enabled': False,
- 'password': randstr(254),
- }
-
-def random_key():
- return {
- 'key_type': random.sample(key_types, 1)[0],
- 'key': randkey()
- }
-
-def random_slice():
- return {
- 'name': site['login_base'] + "_" + randstr(11, letters).lower(),
- 'url': "http://" + randhostname() + "/",
- 'description': randstr(2048),
- }
-
-class Test:
- def __init__(self, check = True, verbose = True):
- self.check = check
- self.verbose = verbose
+ # Add site
+ site_fields = random_site()
+ print "AddSite",
+ site_id = AddSite(admin, site_fields)
+
+ # Should return a unique site_id
+ assert site_id not in site_ids
+ site_ids.append(site_id)
+ print "=>", site_id
+
+ # Check site
+ print "GetSites(%d)" % site_id,
+ site = GetSites(admin, [site_id])[0]
+ for field in site_fields:
+ assert site[field] == site_fields[field]
+ print "=> OK"
+
+ # Update site
+ site_fields = random_site()
+ # XXX Currently cannot change login_base
+ del site_fields['login_base']
+ site_fields['max_slices'] = randint(1, 10)
+ print "UpdateSite(%d)" % site_id,
+ UpdateSite(admin, site_id, site_fields)
+ print "=> OK"
+
+ # Check site again
+ site = GetSites(admin, [site_id])[0]
+ for field in site_fields:
+ assert site[field] == site_fields[field]
+
+print "GetSites",
+sites = GetSites(admin, site_ids)
+assert set(site_ids) == set([site['site_id'] for site in sites])
+print "=>", site_ids
+
+# Add address types
+address_type_ids = []
+for i in range(3):
+ def random_address_type():
+ return {
+ 'name': randstr(20),
+ 'description': randstr(254),
+ }
+
+ print "AddAddressType",
+ address_type_fields = random_address_type()
+ address_type_id = AddAddressType(admin, address_type_fields)
+
+ # Should return a unique address_type_id
+ assert address_type_id not in address_type_ids
+ address_type_ids.append(address_type_id)
+ print "=>", address_type_id
+
+ # Check address type
+ print "GetAddressTypes(%d)" % address_type_id,
+ address_type = GetAddressTypes(admin, [address_type_id])[0]
+ for field in 'name', 'description':
+ assert address_type[field] == address_type_fields[field]
+ print "=> OK"
+
+ # Update address type
+ address_type_fields = random_address_type()
+ print "UpdateAddressType(%d)" % address_type_id,
+ UpdateAddressType(admin, address_type_id, address_type_fields)
+ print "=> OK"
+
+ # Check address type again
+ address_type = GetAddressTypes(admin, [address_type_id])[0]
+ for field in 'name', 'description':
+ assert address_type[field] == address_type_fields[field]
+
+print "GetAddressTypes",
+address_types = GetAddressTypes(admin, address_type_ids)
+assert set(address_type_ids) == set([address_type['address_type_id'] for address_type in address_types])
+print "=>", address_type_ids
+
+# Add site addresses
+address_ids = []
+for site_id in site_ids:
+ for i in range(3):
+ def random_address():
+ return {
+ 'line1': randstr(254),
+ 'line2': randstr(254),
+ 'line3': randstr(254),
+ 'city': randstr(254),
+ 'state': randstr(254),
+ 'postalcode': randstr(64),
+ 'country': randstr(128),
+ }
+
+ print "AddSiteAddress",
+ address_fields = random_address()
+ address_id = AddSiteAddress(admin, site_id, address_fields)
+
+ # Should return a unique address_id
+ assert address_id not in address_ids
+ address_ids.append(address_id)
+ print "=>", address_id
+
+ # Check address
+ print "GetAddresses(%d)" % address_id,
+ address = GetAddresses(admin, [address_id])[0]
+ for field in address_fields:
+ assert address[field] == address_fields[field]
+ print "=> OK"
+
+ # Update address
+ address_fields = random_address()
+ print "UpdateAddress(%d)" % address_id,
+ UpdateAddress(admin, address_id, address_fields)
+ print "=> OK"
+
+ # Check address again
+ address = GetAddresses(admin, [address_id])[0]
+ for field in address_fields:
+ assert address[field] == address_fields[field]
+
+ # Add address types
+ for address_type_id in address_type_ids:
+ print "AddAddressTypeToAddress(%d, %d)" % (address_type_id, address_id),
+ AddAddressTypeToAddress(admin, address_type_id, address_id)
+ print "=> OK"
- self.site_ids = []
- self.address_type_ids = []
- self.address_ids = []
- self.person_ids = []
-
- def run(self,
- sites = 100,
- address_types = 3,
- addresses = 2,
- persons = 1000,
- keys = 3):
- try:
- try:
- self.AddSites(sites)
- self.AddAddressTypes(address_types)
- self.AddAddresses(addresses)
- self.AddPersons(persons)
- except:
- print_exc()
- finally:
- self.cleanup()
-
- def cleanup(self):
- self.DeletePersons()
- self.DeleteAddresses()
- self.DeleteAddressTypes()
- self.DeleteSites()
-
- def AddSites(self, n = 3):
- """
- Add a number of random sites.
- """
-
- for i in range(n):
- # Add site
- site_fields = random_site()
- site_id = AddSite(site_fields)
-
- # Should return a unique site_id
- assert site_id not in self.site_ids
- self.site_ids.append(site_id)
-
- if self.check:
- # Check site
- site = GetSites([site_id])[0]
- for field in site_fields:
- assert site[field] == site_fields[field]
-
- # Update site
- site_fields = random_site()
- # XXX Currently cannot change login_base
- del site_fields['login_base']
- site_fields['max_slices'] = randint(1, 10)
- UpdateSite(site_id, site_fields)
-
- if self.check:
- # Check site again
- site = GetSites([site_id])[0]
- for field in site_fields:
- assert site[field] == site_fields[field]
-
- if self.check:
- sites = GetSites(self.site_ids)
- assert set(self.site_ids) == set([site['site_id'] for site in sites])
-
- if self.verbose:
- print "Added sites", self.site_ids
-
- def DeleteSites(self):
- """
- Delete any random sites we may have added.
- """
-
- for site_id in self.site_ids:
- DeleteSite(site_id)
- if self.check:
- assert not GetSites([site_id])
-
- if self.check:
- assert not GetSites(self.site_ids)
-
- if self.verbose:
- print "Deleted sites", self.site_ids
-
- self.site_ids = []
-
- def AddAddressTypes(self, n = 3):
- """
- Add a number of random address types.
- """
+print "GetAddresses",
+addresses = GetAddresses(admin, address_ids)
+assert set(address_ids) == set([address['address_id'] for address in addresses])
+for address in addresses:
+ assert set(address_type_ids) == set(address['address_type_ids'])
+print "=>", address_ids
+
+print "GetRoles",
+roles = GetRoles(admin)
+role_ids = [role['role_id'] for role in roles]
+roles = [role['name'] for role in roles]
+roles = dict(zip(roles, role_ids))
+print "=>", role_ids
+
+print "GetKeyTypes",
+key_types = GetKeyTypes(admin)
+print "=>", key_types
+
+# Add users
+person_ids = []
+key_ids = []
+for auth in user, pi, tech:
+ def random_person():
+ global auth
+
+ person_fields = {
+ 'first_name': randstr(128),
+ 'last_name': randstr(128),
+ 'email': randemail(),
+ 'bio': randstr(254),
+ # Accounts are disabled by default
+ 'enabled': False,
+ 'password': randstr(254),
+ }
+
+ auth['Username'] = person_fields['email']
+ auth['AuthString'] = person_fields['password']
+
+ return person_fields
+
+ # Add account
+ person_fields = random_person()
+ print "AddPerson",
+ person_id = AddPerson(admin, person_fields)
+
+ # Should return a unique person_id
+ assert person_id not in person_ids
+ person_ids.append(person_id)
+ print "=>", person_id
+
+ # Check account
+ print "GetPersons(%d)" % person_id,
+ person = GetPersons(admin, [person_id])[0]
+ for field in person_fields:
+ if field != 'password':
+ assert person[field] == person_fields[field]
+ print "=> OK"
+
+ # Update account
+ person_fields = random_person()
+ print "UpdatePerson(%d)" % person_id,
+ UpdatePerson(admin, person_id, person_fields)
+ print "=> OK"
+
+ # Check account again
+ person = GetPersons(admin, [person_id])[0]
+ for field in person_fields:
+ if field != 'password':
+ assert person[field] == person_fields[field]
+
+ # Check that account is really disabled
+ try:
+ assert not AuthCheck(auth)
+ except:
+ pass
+
+ # Add role
+ role_id = roles[auth['Role']]
+ print "AddRoleToPerson(%d, %d)" % (role_id, person_id),
+ AddRoleToPerson(admin, role_id, person_id)
+ person = GetPersons(admin, [person_id])[0]
+ assert [role_id] == person['role_ids']
+ print "=> OK"
+
+ # Enable account
+ UpdatePerson(admin, person_id, {'enabled': True})
+
+ # Check authentication
+ print "AuthCheck(%s)" % auth['Username'],
+ assert AuthCheck(auth)
+ print "=> OK"
+
+ # Associate account with each site
+ for site_id in site_ids:
+ print "AddPersonToSite(%d, %d)" % (person_id, site_id),
+ AddPersonToSite(admin, person_id, site_id)
+ print "=> OK"
+
+ # Make sure it really did it
+ person = GetPersons(admin, [person_id])[0]
+ person_site_ids = person['site_ids']
+ assert set(site_ids) == set(person['site_ids'])
+
+ # First site should be the primary site
+ print "SetPersonPrimarySite(%d, %d)" % (person_id, person_site_ids[1]),
+ SetPersonPrimarySite(auth, person_id, person_site_ids[1])
+ person = GetPersons(admin, [person_id])[0]
+ assert person['site_ids'][0] == person_site_ids[1]
+ print "=> OK"
+
+ def random_key():
+ return {
+ 'key_type': random.sample(key_types, 1)[0],
+ 'key': randkey()
+ }
+
+ # Add keys
+ for i in range(3):
+ # Add slice attribute
+ key_fields = random_key()
+ print "AddPersonKey",
+ key_id = AddPersonKey(admin, person_id, key_fields)
+
+ # Should return a unique key_id
+ assert key_id not in key_ids
+ key_ids.append(key_id)
+ print "=>", key_id
+
+ # Check key
+ print "GetKeys(%d)" % key_id,
+ key = GetKeys(admin, [key_id])[0]
+ for field in key_fields:
+ assert key[field] == key_fields[field]
+ print "=> OK"
+
+ # Update key
+ key_fields = random_key()
+ print "UpdateKey(%d)" % key_id,
+ UpdateKey(admin, key_id, key_fields)
+ key = GetKeys(admin, [key_id])[0]
+ for field in key_fields:
+ assert key[field] == key_fields[field]
+ print "=> OK"
+
+ # Add and immediately blacklist a key
+ key_fields = random_key()
+ print "AddPersonKey",
+ key_id = AddPersonKey(admin, person_id, key_fields)
+ print "=>", key_id
+
+ print "BlacklistKey(%d)" % key_id,
+ BlacklistKey(admin, key_id)
+
+ # Is effectively deleted
+ assert not GetKeys(admin, [key_id])
+ person = GetPersons(admin, [person_id])[0]
+ assert key_id not in person['key_ids']
+
+ # Cannot be added again
+ try:
+ key_id = AddPersonKey(admin, person_id, key_fields)
+ assert False
+ except Exception, e:
+ pass
+
+ print "=> OK"
+
+print "GetPersons",
+persons = GetPersons(admin, person_ids)
+assert set(person_ids) == set([person['person_id'] for person in persons])
+print "=>", person_ids
+
+# Add node groups
+nodegroup_ids = []
+for i in range(3):
+ def random_nodegroup():
+ return {
+ 'name': randstr(50),
+ 'description': randstr(200),
+ }
+
+ # Add node group
+ print "AddNodeGroup",
+ nodegroup_fields = random_nodegroup()
+ nodegroup_id = AddNodeGroup(admin, nodegroup_fields)
+
+ # Should return a unique nodegroup_id
+ assert nodegroup_id not in nodegroup_ids
+ nodegroup_ids.append(nodegroup_id)
+ print "=>", nodegroup_id
+
+ # Check node group
+ print "GetNodeGroups(%d)" % nodegroup_id,
+ nodegroup = GetNodeGroups(admin, [nodegroup_id])[0]
+ for field in nodegroup_fields:
+ assert nodegroup[field] == nodegroup_fields[field]
+ print "=> OK"
+
+ # Update node group, with a readable name
+ nodegroup_fields = random_nodegroup()
+ nodegroup_fields['name'] = randstr(16, letters + ' ' + digits)
+ print "UpdateNodeGroup",
+ UpdateNodeGroup(admin, nodegroup_id, nodegroup_fields)
+ print "=> OK"
+
+ # Check node group again
+ nodegroup = GetNodeGroups(admin, [nodegroup_id])[0]
+ for field in nodegroup_fields:
+ assert nodegroup[field] == nodegroup_fields[field]
+
+print "GetNodeGroups",
+nodegroups = GetNodeGroups(admin, nodegroup_ids)
+assert set(nodegroup_ids) == set([nodegroup['nodegroup_id'] for nodegroup in nodegroups])
+print "=>", nodegroup_ids
+
+print "GetBootStates",
+boot_states = GetBootStates(admin)
+print "=>", boot_states
+
+# Add nodes
+node_ids = []
+for site_id in site_ids:
+ for i in range(3):
+ def random_node():
+ return {
+ 'hostname': randhostname(),
+ 'boot_state': random.sample(boot_states, 1)[0],
+ 'model': randstr(255),
+ 'version': randstr(64),
+ }
+
+ # Add node
+ node_fields = random_node()
+ print "AddNode",
+ node_id = AddNode(admin, site_id, node_fields)
+
+ # Should return a unique node_id
+ assert node_id not in node_ids
+ node_ids.append(node_id)
+ print "=>", node_id
+
+ # Check node
+ print "GetNodes(%d)" % node_id,
+ node = GetNodes(admin, [node_id])[0]
+ for field in node_fields:
+ assert node[field] == node_fields[field]
+ print "=> OK"
+
+ # Update node
+ node_fields = random_node()
+ print "UpdateNode(%d)" % node_id,
+ UpdateNode(admin, node_id, node_fields)
+ print "=> OK"
+
+ # Check node again
+ node = GetNodes(admin, [node_id])[0]
+ for field in node_fields:
+ assert node[field] == node_fields[field]
+
+ # Add to node groups
+ for nodegroup_id in nodegroup_ids:
+ print "AddNodeToNodeGroup(%d, %d)" % (node_id, nodegroup_id),
+ AddNodeToNodeGroup(admin, node_id, nodegroup_id)
+ print "=> OK"
- for i in range(n):
- address_type_fields = random_address_type()
- address_type_id = AddAddressType(address_type_fields)
-
- # Should return a unique address_type_id
- assert address_type_id not in self.address_type_ids
- self.address_type_ids.append(address_type_id)
-
- if self.check:
- # Check address type
- address_type = GetAddressTypes([address_type_id])[0]
- for field in 'name', 'description':
- assert address_type[field] == address_type_fields[field]
-
- # Update address type
- address_type_fields = random_address_type()
- UpdateAddressType(address_type_id, address_type_fields)
-
- # Check address type again
- address_type = GetAddressTypes([address_type_id])[0]
- for field in 'name', 'description':
- assert address_type[field] == address_type_fields[field]
-
- if self.check:
- address_types = GetAddressTypes(self.address_type_ids)
- assert set(self.address_type_ids) == set([address_type['address_type_id'] for address_type in address_types])
-
- if self.verbose:
- print "Added address types", self.address_type_ids
-
- def DeleteAddressTypes(self):
- """
- Delete any random address types we may have added.
- """
-
- for address_type_id in self.address_type_ids:
- DeleteAddressType(address_type_id)
- if self.check:
- assert not GetAddressTypes([address_type_id])
-
- if self.check:
- assert not GetAddressTypes(self.address_type_ids)
-
- if self.verbose:
- print "Deleted address types", self.address_type_ids
-
- self.address_type_ids = []
-
- def AddAddresses(self, n = 3):
- """
- Add a number of random addresses to each site.
- """
-
- for site_id in self.site_ids:
- for i in range(n):
- address_fields = random_address()
- address_id = AddSiteAddress(site_id, address_fields)
-
- # Should return a unique address_id
- assert address_id not in self.address_ids
- self.address_ids.append(address_id)
-
- if self.check:
- # Check address
- address = GetAddresses([address_id])[0]
- for field in address_fields:
- assert address[field] == address_fields[field]
-
- # Update address
- address_fields = random_address()
- UpdateAddress(address_id, address_fields)
-
- # Check address again
- address = GetAddresses([address_id])[0]
- for field in address_fields:
- assert address[field] == address_fields[field]
-
- # Add address types
- for address_type_id in self.address_type_ids:
- AddAddressTypeToAddress(address_type_id, address_id)
-
- if self.check:
- addresses = GetAddresses(self.address_ids)
- assert set(self.address_ids) == set([address['address_id'] for address in addresses])
- for address in addresses:
- assert set(self.address_type_ids) == set(address['address_type_ids'])
-
- if self.verbose:
- print "Added addresses", self.address_ids
-
- def DeleteAddresses(self):
- """
- Delete any random addresses we may have added.
- """
-
- # Delete site addresses
- for address_id in self.address_ids:
- # Remove address types
- for address_type_id in self.address_type_ids:
- DeleteAddressTypeFromAddress(address_type_id, address_id)
-
- if self.check:
- address = GetAddresses([address_id])[0]
- assert not address['address_type_ids']
-
- DeleteAddress(address_id)
- if self.check:
- assert not GetAddresses([address_id])
-
- if self.check:
- assert not GetAddresses(self.address_ids)
-
- if self.verbose:
- print "Deleted addresses", self.address_ids
-
- self.address_ids = []
-
- def AddPersons(self, n = 3):
- """
- Add a number of random users to each site.
- """
-
- roles = GetRoles()
- role_ids = [role['role_id'] for role in roles]
- roles = [role['name'] for role in roles]
- roles = dict(zip(roles, role_ids))
-
- for i in range(n):
-
- # Add account
- person_fields = random_person()
- person_id = AddPerson(person_fields)
-
- # Should return a unique person_id
- assert person_id not in self.person_ids
- self.person_ids.append(person_id)
-
- if self.check:
- # Check account
- person = GetPersons([person_id])[0]
- for field in person_fields:
- if field != 'password':
- assert person[field] == person_fields[field]
-
- # Update account
- person_fields = random_person()
- UpdatePerson(person_id, person_fields)
-
- # Check account again
- person = GetPersons([person_id])[0]
- for field in person_fields:
- if field != 'password':
- assert person[field] == person_fields[field]
-
- auth = {'AuthMethod': "password",
- 'Username': person_fields['email'],
- 'AuthString': person_fields['password']}
-
- if self.check:
- # Check that account is disabled
- try:
- assert not AuthCheck(auth)
- except:
- pass
-
- # Add random set of roles
- person_roles = random.sample(['user', 'pi', 'tech'], randint(1, 3))
- for person_role in person_roles:
- role_id = roles[person_role]
- AddRoleToPerson(role_id, person_id)
-
- if self.check:
- person = GetPersons([person_id])[0]
- assert set(person_roles) == set(person['roles'])
-
- # Enable account
- UpdatePerson(person_id, {'enabled': True})
-
- if self.check:
- # Check that account is enabled
- assert AuthCheck(auth)
-
- # Associate account with random set of sites
- person_site_ids = []
- for site_id in random.sample(self.site_ids, randint(1, len(self.site_ids))):
- AddPersonToSite(person_id, site_id)
- person_site_ids.append(site_id)
-
- if self.check:
- # Make sure it really did it
- person = GetPersons([person_id])[0]
- assert set(person_site_ids) == set(person['site_ids'])
-
- # Set a primary site
- primary_site_id = random.sample(person_site_ids, randint(1, len(person_site_ids)))[0]
- SetPersonPrimarySite(person_id, primary_site_id)
-
- if self.check:
- person = GetPersons([person_id])[0]
- assert person['site_ids'][0] == primary_site_id
-
- if self.verbose:
- print "Added users", self.person_ids
-
- def DeletePersons(self):
- # Delete users
- for person_id in self.person_ids:
- # Remove from each site
- for site_id in self.site_ids:
- DeletePersonFromSite(person_id, site_id)
-
- if self.check:
- person = GetPersons([person_id])[0]
- assert not person['site_ids']
-
- # Revoke roles
- person = GetPersons([person_id])[0]
- for role_id in person['role_ids']:
- DeleteRoleFromPerson(role_id, person_id)
-
- if self.check:
- person = GetPersons([person_id])[0]
- assert not person['role_ids']
-
- # Disable account
- UpdatePerson(person_id, {'enabled': False})
-
- if self.check:
- person = GetPersons([person_id])[0]
- assert not person['enabled']
-
- # Delete account
- DeletePerson(person_id)
-
- if self.check:
- assert not GetPersons([person_id])
-
- if self.check:
- assert not GetPersons(self.person_ids)
-
- if self.verbose:
- print "Deleted users", self.person_ids
-
- self.person_ids = []
-
-if __name__ == "__main__":
- parser = OptionParser()
- parser.add_option("-c", "--check", action = "store_true", default = False, help = "Verify actions (default: %default)")
- parser.add_option("-q", "--quiet", action = "store_true", default = False, help = "Be quiet (default: %default)")
- parser.add_option("-p", "--populate", action = "store_true", default = False, help = "Do not cleanup (default: %default)")
- (options, args) = parser.parse_args()
- test = Test(check = options.check, verbose = not options.quiet)
- test.run()
- if not options.populate:
- test.cleanup()
+print "GetNodes",
+nodes = GetNodes(admin, node_ids)
+assert set(node_ids) == set([node['node_id'] for node in nodes])
+print "=>", node_ids
+
+print "GetNodeGroups",
+nodegroups = GetNodeGroups(admin, nodegroup_ids)
+for nodegroup in nodegroups:
+ assert set(nodegroup['node_ids']) == set(node_ids)
+print "=> OK"
+
+print "GetNetworkMethods",
+network_methods = GetNetworkMethods(admin)
+print "=>", network_methods
+
+print "GetNetworkTypes",
+network_types = GetNetworkTypes(admin)
+print "=>", network_types
+
+# Add node networks
+nodenetwork_ids = []
+for node_id in node_ids:
+ def random_nodenetwork(method, type):
+ nodenetwork_fields = {
+ 'method': method,
+ 'type': type,
+ 'bwlimit': randint(500000, 10000000),
+ }
+
+ if method != 'dhcp':
+ ip = randint(0, 0xffffffff)
+ netmask = (0xffffffff << randint(2, 31)) & 0xffffffff
+ network = ip & netmask
+ broadcast = ((ip & netmask) | ~netmask) & 0xffffffff
+ gateway = randint(network + 1, broadcast - 1)
+ dns1 = randint(0, 0xffffffff)
+
+ for field in 'ip', 'netmask', 'network', 'broadcast', 'gateway', 'dns1':
+ nodenetwork_fields[field] = socket.inet_ntoa(struct.pack('>L', locals()[field]))
+
+ return nodenetwork_fields
+
+ for method in network_methods:
+ for type in network_types:
+ # Add node network
+ print "AddNodeNetwork",
+ nodenetwork_fields = random_nodenetwork(method, type)
+ nodenetwork_id = AddNodeNetwork(admin, node_id, nodenetwork_fields)
+
+ # Should return a unique nodenetwork_id
+ assert nodenetwork_id not in nodenetwork_ids
+ nodenetwork_ids.append(nodenetwork_id)
+ print "=>", nodenetwork_id
+
+ # Check node network
+ print "GetNodeNetworks(%d)" % nodenetwork_id,
+ nodenetwork = GetNodeNetworks(admin, [nodenetwork_id])[0]
+ for field in nodenetwork_fields:
+ assert nodenetwork[field] == nodenetwork_fields[field]
+ print "=> OK"
+
+ # Update node network
+ nodenetwork_fields = random_nodenetwork(method, type)
+ print "UpdateNodeNetwork(%d)" % nodenetwork_id,
+ UpdateNodeNetwork(admin, nodenetwork_id, nodenetwork_fields)
+ print "=> OK"
+
+ # Check node network again
+ nodenetwork = GetNodeNetworks(admin, [nodenetwork_id])[0]
+ for field in nodenetwork_fields:
+ assert nodenetwork[field] == nodenetwork_fields[field]
+
+print "GetNodeNetworks",
+nodenetworks = GetNodeNetworks(admin, nodenetwork_ids)
+assert set(nodenetwork_ids) == set([nodenetwork['nodenetwork_id'] for nodenetwork in nodenetworks])
+print "=>", nodenetwork_ids
+
+# Add PCUs
+pcu_ids = []
+for site_id in site_ids:
+ def random_pcu():
+ return {
+ 'hostname': randhostname(),
+ 'ip': socket.inet_ntoa(struct.pack('>L', randint(0, 0xffffffff))),
+ 'protocol': randstr(16),
+ 'username': randstr(254),
+ 'password': randstr(254),
+ 'notes': randstr(254),
+ 'model': randstr(32),
+ }
+
+ # Add PCU
+ pcu_fields = random_pcu()
+ print "AddPCU",
+ pcu_id = AddPCU(admin, site_id, pcu_fields)
+
+ # Should return a unique pcu_id
+ assert pcu_id not in pcu_ids
+ pcu_ids.append(pcu_id)
+ print "=>", pcu_id
+
+ # Check PCU
+ print "GetPCUs(%d)" % pcu_id,
+ pcu = GetPCUs(admin, [pcu_id])[0]
+ for field in pcu_fields:
+ assert pcu[field] == pcu_fields[field]
+ print "=> OK"
+
+ # Update PCU
+ pcu_fields = random_pcu()
+ print "UpdatePCU(%d)" % pcu_id,
+ UpdatePCU(admin, pcu_id, pcu_fields)
+ print "=> OK"
+
+ # Check PCU again
+ pcu = GetPCUs(admin, [pcu_id])[0]
+ for field in pcu_fields:
+ assert pcu[field] == pcu_fields[field]
+
+ # Add each node at this site to a different port on this PCU
+ site = GetSites(admin, [site_id])[0]
+ port = randint(1, 10)
+ for node_id in site['node_ids']:
+ print "AddNodeToPCU(%d, %d, %d)" % (node_id, pcu_id, port),
+ AddNodeToPCU(admin, node_id, pcu_id, port)
+ print "=> OK"
+ port += 1
+
+print "GetPCUs",
+pcus = GetPCUs(admin, pcu_ids)
+assert set(pcu_ids) == set([pcu['pcu_id'] for pcu in pcus])
+print "=>", pcu_ids
+
+# Add configuration files
+conf_file_ids = []
+for nodegroup_id in nodegroup_ids:
+ def random_conf_file():
+ return {
+ 'enabled': bool(randint()),
+ 'source': randpath(255),
+ 'dest': randpath(255),
+ 'file_permissions': "%#o" % randint(0, 512),
+ 'file_owner': randstr(32, letters + '_' + digits),
+ 'file_group': randstr(32, letters + '_' + digits),
+ 'preinstall_cmd': randpath(100),
+ 'postinstall_cmd': randpath(100),
+ 'error_cmd': randpath(100),
+ 'ignore_cmd_errors': bool(randint()),
+ 'always_update': bool(randint()),
+ }
+
+ # Add configuration file
+ conf_file_fields = random_conf_file()
+ print "AddConfFile",
+ conf_file_id = AddConfFile(admin, conf_file_fields)
+
+ # Should return a unique conf_file_id
+ assert conf_file_id not in conf_file_ids
+ conf_file_ids.append(conf_file_id)
+ print "=>", conf_file_id
+
+ # Check configuration file
+ print "GetConfFiles(%d)" % conf_file_id,
+ conf_file = GetConfFiles(admin, [conf_file_id])[0]
+ for field in conf_file_fields:
+ assert conf_file[field] == conf_file_fields[field]
+ print "=> OK"
+
+ # Update configuration file
+ conf_file_fields = random_conf_file()
+ print "UpdateConfFile(%d)" % conf_file_id,
+ UpdateConfFile(admin, conf_file_id, conf_file_fields)
+ print "=> OK"
+
+ # Check configuration file
+ conf_file = GetConfFiles(admin, [conf_file_id])[0]
+ for field in conf_file_fields:
+ assert conf_file[field] == conf_file_fields[field]
+
+ # Add to all node groups
+ for nodegroup_id in nodegroup_ids:
+ print "AddConfFileToNodeGroup(%d, %d)" % (conf_file_id, nodegroup_id),
+ AddConfFileToNodeGroup(admin, conf_file_id, nodegroup_id)
+ print "=> OK"
+
+ # Add to all nodes
+ for node_id in node_ids:
+ print "AddConfFileToNode(%d, %d)" % (conf_file_id, node_id),
+ AddConfFileToNode(admin, conf_file_id, node_id)
+ print "=> OK"
+
+print "GetConfFiles",
+conf_files = GetConfFiles(admin, conf_file_ids)
+assert set(conf_file_ids) == set([conf_file['conf_file_id'] for conf_file in conf_files])
+for conf_file in conf_files:
+ assert set(nodegroup_ids) == set(conf_file['nodegroup_ids'])
+ assert set(node_ids) == set(conf_file['node_ids'])
+print "=>", conf_file_ids
+
+# Add slice attribute types
+attribute_type_ids = []
+for i in range(3):
+ def random_attribute_type():
+ return {
+ 'name': randstr(100),
+ 'description': randstr(254),
+ 'min_role_id': random.sample(roles.values(), 1)[0],
+ }
+
+ # Add slice attribute type
+ attribute_type_fields = random_attribute_type()
+ print "AddSliceAttributeType",
+ attribute_type_id = AddSliceAttributeType(admin, attribute_type_fields)
+
+ # Should return a unique attribute_type_id
+ assert attribute_type_id not in attribute_type_ids
+ attribute_type_ids.append(attribute_type_id)
+ print "=>", attribute_type_id
+
+ # Check slice attribute type
+ print "GetSliceAttributeTypes(%d)" % attribute_type_id,
+ attribute_type = GetSliceAttributeTypes(admin, [attribute_type_id])[0]
+ for field in attribute_type_fields:
+ assert attribute_type[field] == attribute_type_fields[field]
+ print "=> OK"
+
+ # Update slice attribute type
+ attribute_type_fields = random_attribute_type()
+ print "UpdateSliceAttributeType(%d)" % attribute_type_id,
+ UpdateSliceAttributeType(admin, attribute_type_id, attribute_type_fields)
+ print "=> OK"
+
+ # Check slice attribute type again
+ attribute_type = GetSliceAttributeTypes(admin, [attribute_type_id])[0]
+ for field in attribute_type_fields:
+ assert attribute_type[field] == attribute_type_fields[field]
+
+# Add slices and slice attributes
+slice_ids = []
+slice_attribute_ids = []
+for site in sites:
+ for i in range(site['max_slices']):
+ def random_slice():
+ return {
+ 'name': site['login_base'] + "_" + randstr(11, letters).lower(),
+ 'url': "http://" + randhostname() + "/",
+ 'description': randstr(2048),
+ }
+
+ # Add slice
+ slice_fields = random_slice()
+ print "AddSlice",
+ slice_id = AddSlice(admin, slice_fields)
+
+ # Should return a unique slice_id
+ assert slice_id not in slice_ids
+ slice_ids.append(slice_id)
+ print "=>", slice_id
+
+ # Check slice
+ print "GetSlices(%d)" % slice_id,
+ slice = GetSlices(admin, [slice_id])[0]
+ for field in slice_fields:
+ assert slice[field] == slice_fields[field]
+ print "=> OK"
+
+ # Update slice
+ slice_fields = random_slice()
+ # Cannot change slice name
+ del slice_fields['name']
+ print "UpdateSlice(%d)" % slice_id,
+ UpdateSlice(admin, slice_id, slice_fields)
+ slice = GetSlices(admin, [slice_id])[0]
+ for field in slice_fields:
+ assert slice[field] == slice_fields[field]
+ print "=> OK"
+
+ # Add slice to all nodes
+ print "AddSliceToNodes(%d, %s)" % (slice_id, str(node_ids)),
+ AddSliceToNodes(admin, slice_id, node_ids)
+ slice = GetSlices(admin, [slice_id])[0]
+ assert set(node_ids) == set(slice['node_ids'])
+ print "=> OK"
+
+ # Add users to slice
+ for person_id in person_ids:
+ print "AddPersonToSlice(%d, %d)" % (person_id, slice_id),
+ AddPersonToSlice(admin, person_id, slice_id)
+ print "=> OK"
+ slice = GetSlices(admin, [slice_id])[0]
+ assert set(person_ids) == set(slice['person_ids'])
+
+ # Set slice/sliver attributes
+ for attribute_type_id in attribute_type_ids:
+ value = randstr(16, letters + '_' + digits)
+ # Make it a sliver attribute with 50% probability
+ node_id = random.sample(node_ids + [None] * len(node_ids), 1)[0]
+
+ # Add slice attribute
+ print "AddSliceAttribute(%d, %d)" % (slice_id, attribute_type_id),
+ if node_id is None:
+ slice_attribute_id = AddSliceAttribute(admin, slice_id, attribute_type_id, value)
+ else:
+ slice_attribute_id = AddSliceAttribute(admin, slice_id, attribute_type_id, value, node_id)
+
+ # Should return a unique slice_attribute_id
+ assert slice_attribute_id not in slice_attribute_ids
+ slice_attribute_ids.append(slice_attribute_id)
+ print "=>", slice_attribute_id
+
+ # Check slice attribute
+ print "GetSliceAttributes(%d)" % slice_attribute_id,
+ slice_attribute = GetSliceAttributes(admin, [slice_attribute_id])[0]
+ for field in 'attribute_type_id', 'slice_id', 'node_id', 'slice_attribute_id', 'value':
+ assert slice_attribute[field] == locals()[field]
+ print "=> OK"
+
+ # Update slice attribute
+ value = randstr(16, letters + '_' + digits)
+ print "UpdateSliceAttribute(%d)" % slice_attribute_id,
+ UpdateSliceAttribute(admin, slice_attribute_id, value)
+ slice_attribute = GetSliceAttributes(admin, [slice_attribute_id])[0]
+ for field in 'attribute_type_id', 'slice_id', 'node_id', 'slice_attribute_id', 'value':
+ assert slice_attribute[field] == locals()[field]
+ print "=> OK"
+
+# Delete slices
+for slice_id in slice_ids:
+ # Delete slice attributes
+ slice = GetSlices(admin, [slice_id])[0]
+ for slice_attribute_id in slice['slice_attribute_ids']:
+ print "DeleteSliceAttribute(%d, %d)" % (slice_id, slice_attribute_id),
+ DeleteSliceAttribute(admin, slice_attribute_id)
+ print "=> OK"
+ slice = GetSlices(admin, [slice_id])[0]
+ assert not slice['slice_attribute_ids']
+
+ # Delete users from slice
+ for person_id in person_ids:
+ print "DeletePersonFromSlice(%d, %d)" % (person_id, slice_id),
+ DeletePersonFromSlice(admin, person_id, slice_id)
+ print "=> OK"
+ slice = GetSlices(admin, [slice_id])[0]
+ assert not slice['person_ids']
+
+ # Delete nodes from slice
+ print "DeleteSliceFromNodes(%d, %s)" % (slice_id, node_ids),
+ DeleteSliceFromNodes(admin, slice_id, node_ids)
+ print "=> OK"
+ slice = GetSlices(admin, [slice_id])[0]
+ assert not slice['node_ids']
+
+ # Delete slice
+ print "DeleteSlice(%d)" % slice_id,
+ DeleteSlice(admin, slice_id)
+ assert not GetSlices(admin, [slice_id])
+
+ # Make sure it really deleted it
+ slices = GetSlices(admin, slice_ids)
+ assert slice_id not in [slice['slice_id'] for slice in slices]
+ print "=> OK"
+
+print "GetSlices",
+assert not GetSlices(admin, slice_ids)
+print "=> []"
+
+# Delete slice attribute types
+for attribute_type_id in attribute_type_ids:
+ # Delete slice attribute type
+ print "DeleteSliceAttributeType(%d)" % attribute_type_id,
+ DeleteSliceAttributeType(admin, attribute_type_id)
+ assert not GetSliceAttributeTypes(admin, [attribute_type_id])
+
+ # Make sure it really deleted it
+ attribute_types = GetSliceAttributeTypes(admin, attribute_type_ids)
+ assert attribute_type_id not in [attribute_type['attribute_type_id'] for attribute_type in attribute_types]
+ print "=> OK"
+
+print "GetSliceAttributeTypes",
+assert not GetSliceAttributeTypes(admin, attribute_type_ids)
+print "=> []"
+
+# Delete configuration files
+for conf_file in conf_files:
+ conf_file_id = conf_file['conf_file_id']
+
+ for node_id in conf_file['node_ids']:
+ print "DeleteConfFileFromNode(%d, %d)" % (conf_file_id, node_id),
+ DeleteConfFileFromNode(admin, conf_file_id, node_id)
+ print "=> OK"
+
+ for nodegroup_id in conf_file['nodegroup_ids']:
+ print "DeleteConfFileFromNodeGroup(%d, %d)" % (conf_file_id, nodegroup_id),
+ DeleteConfFileFromNodeGroup(admin, conf_file_id, nodegroup_id)
+ print "=> OK"
+
+ print "DeleteConfFile(%d)" % conf_file_id,
+ DeleteConfFile(admin, conf_file_id)
+ print "=> OK"
+
+print "GetConfFiles",
+assert not GetConfFiles(admin, conf_file_ids)
+print "=> []"
+
+# Delete PCUs
+for pcu in pcus:
+ pcu_id = pcu['pcu_id']
+
+ for node_id in pcu['node_ids']:
+ print "DeleteNodeFromPCU(%d, %d)" % (node_id, pcu_id),
+ DeleteNodeFromPCU(admin, node_id, pcu_id)
+ print "=> OK"
+
+ print "DeletePCU(%d)" % pcu_id,
+ DeletePCU(admin, pcu_id)
+ print "=> OK"
+
+print "GetPCUs",
+assert not GetPCUs(admin, pcu_ids)
+print "=> []"
+
+# Delete node networks
+for nodenetwork_id in nodenetwork_ids:
+ print "DeleteNodeNetwork(%d)" % nodenetwork_id,
+ DeleteNodeNetwork(admin, nodenetwork_id)
+ print "=>", "OK"
+
+print "GetNodeNetworks",
+assert not GetNodeNetworks(admin, nodenetwork_ids)
+print "=> []"
+
+# Delete nodes
+for node_id in node_ids:
+ # Remove from node groups
+ for nodegroup_id in nodegroup_ids:
+ print "DeleteNodeFromNodeGroup(%d, %d)" % (node_id, nodegroup_id),
+ DeleteNodeFromNodeGroup(admin, node_id, nodegroup_id)
+ print "=> OK"
+ node = GetNodes(admin, [node_id])[0]
+ assert not node['nodegroup_ids']
+
+ # Delete node
+ print "DeleteNode(%d)" % node_id,
+ DeleteNode(admin, node_id)
+ assert not GetNodes(admin, [node_id])
+
+ # Make sure it really deleted it
+ nodes = GetNodes(admin, node_ids)
+ assert node_id not in [node['node_id'] for node in nodes]
+ print "=> OK"
+
+print "GetNodes",
+assert not GetNodes(admin, node_ids)
+print "=> []"
+
+nodegroups = GetNodeGroups(admin, nodegroup_ids)
+for nodegroup in nodegroups:
+ assert not set(node_ids).intersection(nodegroup['node_ids'])
+
+# Delete users
+for person_id in person_ids:
+ # Delete keys
+ person = GetPersons(admin, [person_id])[0]
+ for key_id in person['key_ids']:
+ print "DeleteKey(%d)" % key_id,
+ DeleteKey(admin, key_id)
+ print "=> OK"
+ person = GetPersons(admin, [person_id])[0]
+ assert not person['key_ids']
+
+ # Remove from each site
+ for site_id in site_ids:
+ print "DeletePersonFromSite(%d, %d)" % (person_id, site_id),
+ DeletePersonFromSite(admin, person_id, site_id)
+ print "=> OK"
+ person = GetPersons(admin, [person_id])[0]
+ assert not person['site_ids']
+
+ # Revoke role
+ person = GetPersons(admin, [person_id])[0]
+ for role_id in person['role_ids']:
+ print "DeleteRoleFromPerson(%d, %d)" % (role_id, person_id),
+ DeleteRoleFromPerson(admin, role_id, person_id)
+ print "=> OK"
+ person = GetPersons(admin, [person_id])[0]
+ assert not person['role_ids']
+
+ # Disable account
+ UpdatePerson(admin, person_id, {'enabled': False})
+ person = GetPersons(admin, [person_id])[0]
+ assert not person['enabled']
+
+ # Delete account
+ print "DeletePerson(%d)" % person_id,
+ DeletePerson(admin, person_id)
+ assert not GetPersons(admin, [person_id])
+ print "=> OK"
+
+print "GetPersons",
+assert not GetPersons(admin, person_ids)
+print "=> []"
+
+# Delete node groups
+for nodegroup_id in nodegroup_ids:
+ print "DeleteNodeGroup(%d)" % nodegroup_id,
+ DeleteNodeGroup(admin, nodegroup_id)
+ assert not GetNodeGroups(admin, [nodegroup_id])
+ print "=> OK"
+
+print "GetNodeGroups",
+assert not GetNodeGroups(admin, nodegroup_ids)
+print "=> []"
+
+# Delete site addresses
+for address_id in address_ids:
+ # Remove address types
+ for address_type_id in address_type_ids:
+ print "DeleteAddressTypeFromAddress(%d, %d)" % (address_type_id, address_id),
+ DeleteAddressTypeFromAddress(admin, address_type_id, address_id)
+ print "=> OK"
+ address = GetAddresses(admin, [address_id])[0]
+ assert not address['address_type_ids']
+
+ print "DeleteAddress(%d)" % address_id,
+ DeleteAddress(admin, address_id)
+ assert not GetAddresses(admin, [address_id])
+ print "=> OK"
+
+print "GetAddresss",
+assert not GetAddresses(admin, address_ids)
+print "=> []"
+
+# Delete address types
+for address_type_id in address_type_ids:
+ print "DeleteAddressType(%d)" % address_type_id,
+ DeleteAddressType(admin, address_type_id)
+ assert not GetAddressTypes(admin, [address_type_id])
+ print "=> OK"
+
+print "GetAddressTypes",
+assert not GetAddressTypes(admin, address_type_ids)
+print "=> []"
+
+# Delete sites
+for site_id in site_ids:
+ print "DeleteSite(%d)" % site_id,
+ DeleteSite(admin, site_id)
+ assert not GetSites(admin, [site_id])
+ print "=> OK"
+
+print "GetSites",
+assert not GetSites(admin, site_ids)
+print "=> []"
-#!/usr/bin/python
+#!/usr/bin/env python
+###
+##############################
+###
+### preparation / requirements
+###
+### two separate instances of myplc
+### for now they are located on the same box on lurch
+###
+### expectations :
+### your myplcs should more or less come out of the box,
+### I prefer not to alter the default PLC_ROOT_USER value,
+### instead we create a PI account on the site_id=1
+###
+##############################
-for peer in GetPeers():
- # Clear out everything
- for node in GetPeerNodes(peer['node_ids']):
+### xxx todo
+# check sites
+# check persons
+
+# support reloading without wiping everything off
+# dunno how to do (defvar plc)
+
+import getopt
+import sys
+import time
+
+import Shell
+import PLC.Methods
+
+# when running locally, we might wish to run only our local stuff
+dummy_print_methods = [ 'RefreshPeer' ]
+class DummyShell:
+ class Callable:
+ def __init__(self,method,index):
+ self.method=method
+ self.index=index
+ self.printed=False
+ def __call__ (self, *args, **kwds):
+ if not self.printed or self.method in dummy_print_methods:
+ print "Dummy method %s on remote peer %d skipped"%(self.method,self.index)
+ self.printed=True
+ return 0
+ def __init__(self,index):
+ self.index=index
+ def init(self):
+ for method in PLC.Methods.methods:
+ # ignore path-defined methods for now
+ if "." not in method:
+ setattr(self,method,DummyShell.Callable(method,self.index))
+ def show_config(self,*args):
+ print 'DummyShell'
+####################
+import xmlrpclib
+import os
+
+## try to support reload
+try:
+ globals()['plc']
+except:
+ plc=[None,None,None]
+try:
+ globals()['s']
+except:
+ s=[None,None,None]
+
+####################
+# predefined stuff
+# number of 'system' persons
+# builtin maint, local root, 2 persons for the peering
+system_persons = 4
+# among that, 1 gets refreshed - other ones have conflicting names
+system_persons_cross = 1
+
+system_slices_ids = (1,)
+def system_slices ():
+ return len(system_slices_ids)
+def total_slices ():
+ return number_slices+system_slices()
+
+def system_slivers ():
+ return len(system_slices_ids)
+
+# too tedious to do the maths : how many slices attached to node 1
+expected_slivers=None
+def total_slivers ():
+ global expected_slivers
+ if expected_slivers is None:
+ expected_slivers=0
+ actual_nodes_per_slice = min (number_nodes,number_nodes_per_slice)
+ for ns in myrange(number_slices):
+ slice_range = [ map_on_node (n+ns) for n in range(actual_nodes_per_slice)]
+ if 1 in slice_range:
+ expected_slivers += 1
+ return expected_slivers+system_slivers()
+
+####################
+# set initial conditions
+# actual persons_per_slice is min(number_persons,number_persons_per_slice)
+# actual nodes_per_slice is min(number_nodes,number_nodes_per_slice)
+# this is to prevent quadractic test times on big tests
+def define_test (sites,persons,nodes,slices,
+ keys_per_person,nodes_per_slice,persons_per_slice,fast_mode=None):
+ global number_sites, number_persons, number_nodes, number_slices
+ global number_keys_per_person, number_nodes_per_slice, number_persons_per_slice, fast_flag
+ number_sites = sites
+ number_persons=persons
+ number_nodes=nodes
+ number_slices=slices
+ number_keys_per_person=keys_per_person
+ number_nodes_per_slice=nodes_per_slice
+ number_persons_per_slice=persons_per_slice
+ if fast_mode is not None:
+ fast_flag=fast_mode
+
+# when we run locally on a given peer
+local_peer=None
+
+def show_test():
+ print '%d sites, %d persons, %d nodes & %d slices'%(
+ number_sites,number_persons,number_nodes,number_slices)
+ print '%d keys/person, %d nodes/slice & %d persons/slice'%(
+ number_keys_per_person,number_nodes_per_slice,number_persons_per_slice)
+ print 'fast_flag',fast_flag
+ if local_peer is not None:
+ print 'Running locally on index %d'%local_peer
+
+def mini():
+ define_test(1,1,1,1,1,1,1,True)
+
+def normal():
+ define_test (sites=4,persons=4,nodes=5,slices=4,
+ keys_per_person=2,nodes_per_slice=3,persons_per_slice=6,fast_mode=False)
+
+def apply_factor (factor):
+ global number_sites, number_persons, number_nodes, number_slices
+ [number_sites, number_persons, number_nodes, number_slices] = \
+ [factor*x for x in [number_sites, number_persons, number_nodes, number_slices]]
+
+
+# use only 1 key in this case
+big_factor=4
+def big():
+ global number_sites, number_persons, number_nodes, number_slices
+ number_sites=200
+ number_persons=500
+ number_nodes=350
+ number_slices=500
+ global nodes_per_slice
+ nodes_per_slice=3
+ global number_keys_per_person
+ number_keys_per_person=1
+ global number_persons_per_slice
+ number_persons_per_slice=3
+
+#huge_factor=1000
+def huge():
+ global number_sites, number_persons, number_nodes, number_slices
+ number_sites=1000
+ number_persons=2000
+ number_nodes=3000
+ number_slices=2000
+ global nodes_per_slice
+ nodes_per_slice=3
+ global number_keys_per_person
+ number_keys_per_person=1
+ global number_persons_per_slice
+ number_persons_per_slice=3
+
+# use mini test by default in interactive mode
+mini()
+#normal()
+
+####################
+# argh, for login_name that doesn't accept digits
+plain_numbers=['zero','one','two','three','four','five','six','seven','eight','nine','ten',
+ 'eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen','twenty']
+plain_digits=['a','b','c','d','e','f','g','h','i','j']
+####################
+plc[1]={ 'plcname':'Thierry plc1',
+ 'hostname':'planetlab-devbox.inria.fr',
+ 'url-format':'https://%s:443/PLCAPI/',
+ 'builtin-admin-id':'root@plc1.org',
+ 'builtin-admin-password':'root',
+ 'peer-admin-name':'peer1@planet-lab.org',
+ 'peer-admin-password':'peer',
+ 'node-format':'n1-%03d.plc1.org',
+ 'plainname' : 'one',
+ 'site-format':'one%s',
+ 'person-format' : 'user1-%d@plc1.org',
+ 'key-format':'ssh-rsa 11key4plc11 user%d-key%d',
+ 'person-password' : 'password1',
+ }
+plc[2]={ 'plcname':'Thierry plc2',
+ 'hostname':'lurch.cs.princeton.edu',
+ 'url-format':'https://%s:443/PLCAPI/',
+ 'builtin-admin-id':'root@plc2.org',
+ 'builtin-admin-password':'root',
+ 'peer-admin-name':'peer2@planet-lab.org',
+ 'peer-admin-password':'peer',
+ 'node-format':'n2-%03d.plc2.org',
+ 'plainname' : 'two',
+ 'site-format':'two%s',
+ 'person-format' : 'user2-%d@plc2.org',
+ 'key-format':'ssh-rsa 22key4plc22 user%d-key%d',
+ 'person-password' : 'password2',
+ }
+
+####################
+def peer_index(i):
+ return 3-i
+
+def plc_name (i):
+ return plc[i]['plcname']
+
+def site_name (i,n):
+ x=site_login_base(i,n)
+ return 'Site fullname '+x
+
+def site_login_base (i,n):
+ # for huge
+ if number_sites<len(plain_numbers):
+ return plc[i]['site-format']%plain_numbers[n]
+ else:
+ string=''
+ while True:
+ quo=n/10
+ rem=n%10
+ string=plain_digits[rem]+string
+ if quo == 0:
+ break
+ else:
+ n=quo
+ return plc[i]['site-format']%string
+
+def person_name (i,n):
+ return plc[i]['person-format']%n
+
+def key_name (i,n,k):
+ return plc[i]['key-format']%(n,k)
+
+def node_name (i,n):
+ return plc[i]['node-format']%n
+
+def slice_name (i,n):
+ site_index=map_on_site(n)
+ return "%s_slice%d"%(site_login_base(i,site_index),n)
+
+def sat_name (i):
+ return 'sat_%d'%i
+
+# to have indexes start at 1
+def map_on (n,max):
+ result=(n%max)
+ if result==0:
+ result=max
+ return result
+
+def myrange (n):
+ return range (1,n+1,1)
+
+def map_on_site (n):
+ return map_on (n,number_sites)
+
+def map_on_person (n):
+ return map_on (n,number_persons)
+
+def map_on_node (n):
+ return map_on (n,number_nodes)
+
+def message (*args):
+ print "====================",
+ print args
+
+##########
+def timer_start ():
+ global epoch,last_time
+ epoch = time.time()
+ last_time=epoch
+ print '+++ timer start'
+
+def timer_show ():
+ global last_time
+ now=time.time()
+ print '+++ %.02f seconds ellapsed (%.02f)'%(now-epoch,now-last_time)
+ last_time=now
+
+####################
+def test00_init (args=[1,2]):
+ timer_start()
+ ## have you loaded this file already (support for reload)
+ for i in args:
+ url=plc[i]['url-format']%plc[i]['hostname']
+ plc[i]['url']=url
+ if local_peer is None:
+ # the regular remote mode
+ argv=[sys.argv[0],
+ '--url',url,
+ '--user',plc[i]['builtin-admin-id'],
+ '--password',plc[i]['builtin-admin-password']]
+ print 'initializing s[%d]=>%s'%(i,url)
+ s[i]=Shell.Shell(argv)
+ s[i].init()
+ elif local_peer == i:
+ # local mode - use Shell's Direct mode - use /etc/planetlab/plc_config
+ s[i]=Shell.Shell([sys.argv[0]])
+ s[i].init()
+ else:
+ # remote peer in local mode : use dummy shell instead
+ s[i]=DummyShell(i)
+ s[i].init()
+
+def test00_print (args=[1,2]):
+ for i in args:
+ print '==================== s[%d]'%i
+ s[i].show_config()
+ print '===================='
+
+def check_nodes (el,ef,args=[1,2]):
+ for i in args:
+ # use a single request and sort afterwards for efficiency
+ # could have used GetNodes's scope as well
+ all_nodes = s[i].GetNodes()
+ n = len ([ x for x in all_nodes if x['peer_id'] is None])
+ f = len ([ x for x in all_nodes if x['peer_id'] is not None])
+ print '%02d: Checking nodes: got %d local (e=%d) & %d foreign (e=%d)'%(i,n,el,f,ef)
+ assert n==el
+ assert f==ef
+
+def check_keys (el,ef,args=[1,2]):
+ for i in args:
+ # use a single request and sort afterwards for efficiency
+ # could have used GetKeys's scope as well
+ all_keys = s[i].GetKeys()
+ n = len ([ x for x in all_keys if x['peer_id'] is None])
+ f = len ([ x for x in all_keys if x['peer_id'] is not None])
+ print '%02d: Checking keys: got %d local (e=%d) & %d foreign (e=%d)'%(i,n,el,f,ef)
+ assert n==el
+ assert f==ef
+
+def check_persons (el,ef,args=[1,2]):
+ for i in args:
+ # use a single request and sort afterwards for efficiency
+ # could have used GetPersons's scope as well
+ all_persons = s[i].GetPersons()
+ n = len ([ x for x in all_persons if x['peer_id'] is None])
+ f = len ([ x for x in all_persons if x['peer_id'] is not None])
+ print '%02d: Checking persons: got %d local (e=%d) & %d foreign (e=%d)'%(i,n,el,f,ef)
+ assert n==el
+ assert f==ef
+
+# expected : local slices, foreign slices
+def check_slices (els,efs,args=[1,2]):
+ for i in args:
+ ls=len(s[i].GetSlices({'peer_id':None}))
+ fs=len(s[i].GetSlices({'~peer_id':None}))
+ print '%02d: Checking slices: got %d local (e=%d) & %d foreign (e=%d)'%(i,ls,els,fs,efs)
+ assert els==ls
+ assert efs==fs
+
+def show_nodes (i,node_ids):
+ # same as above
+ all_nodes = s[i].GetNodes(node_ids)
+ loc_nodes = filter (lambda n: n['peer_id'] is None, all_nodes)
+ for_nodes = filter (lambda n: n['peer_id'] is not None, all_nodes)
+
+ for message,nodes in [ ['LOC',loc_nodes], ['FOR',for_nodes] ] :
+ if nodes:
+ print '[%s:%d] : '%(message,len(nodes)),
+ for node in nodes:
+ print node['hostname']+' ',
+ print ''
+
+def check_slice_nodes (expected_nodes, is_local_slice, args=[1,2]):
+ for ns in myrange(number_slices):
+ check_slice_nodes_n (ns,expected_nodes, is_local_slice, args)
+
+def check_slice_nodes_n (ns,expected_nodes, is_local_slice, args=[1,2]):
+ for i in args:
+ peer=peer_index(i)
+ if is_local_slice:
+ sname=slice_name(i,ns)
+ slice=s[i].GetSlices({'name':[sname],'peer_id':None})[0]
+ message='local'
+ else:
+ sname=slice_name(peer,ns)
+ slice=s[i].GetSlices({'name':[sname],'~peer_id':None})[0]
+ message='foreign'
+ print '%02d: %s slice %s (e=%d) '%(i,message,sname,expected_nodes),
+ slice_node_ids=slice['node_ids']
+ print 'on nodes ',slice_node_ids
+ show_nodes (i,slice_node_ids)
+ assert len(slice_node_ids)>=expected_nodes
+ if len(slice_node_ids) != expected_nodes:
+ print 'TEMPORARY'
+
+# expected : nodes on local slice
+def check_local_slice_nodes (expected, args=[1,2]):
+ check_slice_nodes(expected,True,args)
+
+# expected : nodes on foreign slice
+def check_foreign_slice_nodes (expected, args=[1,2]):
+ check_slice_nodes(expected,False,args)
+
+def check_conf_files (args=[1,2]):
+ for nn in myrange(number_nodes):
+ check_conf_files_n (nn,args)
+
+def check_conf_files_n (nn,args=[1,2]):
+ for i in args:
+ nodename=node_name(i,nn)
+ ndict= s[i].GetSlivers([nodename])[0]
+ assert ndict['hostname'] == nodename
+ conf_files = ndict['conf_files']
+ print '%02d: %d conf_files in GetSlivers for node %s'%(i,len(conf_files),nodename)
+ for conf_file in conf_files:
+ print 'source=',conf_file['source'],'|',
+ print 'dest=',conf_file['dest'],'|',
+ print 'enabled=',conf_file['enabled'],'|',
+ print ''
+
+import pprint
+pp = pprint.PrettyPrinter(indent=3)
+
+def check_slivers (esn,args=[1,2]):
+ for nn in myrange(number_nodes):
+ check_slivers_n (nn,esn,args)
+
+# too verbose to check all nodes, let's check only the first one
+def check_slivers_1 (esn,args=[1,2]):
+ check_slivers_n (1,esn,args)
+
+def check_slivers_n (nn,esn,args=[1,2]):
+ for i in args:
+ nodename=node_name(i,nn)
+ ndict= s[i].GetSlivers([nodename])[0]
+ assert ndict['hostname'] == nodename
+ slivers = ndict['slivers']
+ print '%02d: %d slivers (exp. %d) in GetSlivers for node %s'\
+ %(i,len(slivers),esn,nodename)
+ for sliver in slivers:
+ print '>>slivername = ',sliver['name']
+ pretty_printer.pprint(sliver)
+ assert len(slivers) == esn
+
+
+####################
+def test00_admin_person (args=[1,2]):
+ global plc
+ for i in args:
+ email = plc[i]['peer-admin-name']
+ try:
+ p=s[i].GetPersons([email])[0]
+ plc[i]['peer-admin-id']=p['person_id']
+ except:
+ person_id=s[i].AddPerson({'first_name':'Local',
+ 'last_name':'PeerPoint',
+ 'role_ids':[10],
+ 'email':email,
+ 'password':plc[i]['peer-admin-password']})
+ if person_id:
+ print '%02d:== created peer admin account %d, %s - %s'%(
+ i, person_id,plc[i]['peer-admin-name'],plc[i]['peer-admin-password'])
+ plc[i]['peer-admin-id']=person_id
+
+def test00_admin_enable (args=[1,2]):
+ for i in args:
+ if s[i].AdmSetPersonEnabled(plc[i]['peer-admin-id'],True):
+ s[i].AddRoleToPerson('admin',plc[i]['peer-admin-id'])
+ print '%02d:== enabled+admin on account %d:%s'%(i,plc[i]['peer-admin-id'],plc[i]['peer-admin-name'])
+
+def test00_peer_person (args=[1,2]):
+ global plc
+ for i in args:
+ peer=peer_index(i)
+ email=plc[peer]['peer-admin-name']
+ try:
+ p=s[i].GetPersons([email])[0]
+ plc[i]['peer_person_id']=p['person_id']
+ except:
+ person_id = s[i].AddPerson ( {'first_name':'Peering(plain passwd)', 'last_name':plc_name(peer), 'role_ids':[3000],
+ 'email':email,'password':plc[peer]['peer-admin-password']})
+ if person_id:
+ print '%02d:== Created person %d as the auth peer person'%(i,person_id)
+ plc[i]['peer_person_id']=person_id
+
+####################
+def test00_peer (args=[1,2]):
+ global plc
+ for i in args:
+ peer=peer_index(i)
+ peername = plc_name(peer)
+ try:
+ p=s[i].GetPeers ( [peername])[0]
+ plc[i]['peer_id']=p['peer_id']
+ except:
+ peer_id=s[i].AddPeer ( {'peername':peername,'peer_url':plc[peer]['url'],'auth_person_id':plc[i]['peer_person_id']})
+ # NOTE : need to manually reset the encrypted password through SQL at this point
+ if peer_id:
+ print '%02d:Created peer %d'%(i,peer_id)
+ print "PLEASE manually set password for person_id=%d in DB%d"%(plc[i]['peer_person_id'],i)
+ plc[i]['peer_id']=peer_id
+
+def test00_peer_passwd (args=[1,2]):
+ if local_peer is None:
+ for i in args:
+ # using an ad-hoc local command for now - never could get quotes to reach sql....
+ print "Attempting to remotely set passwd for person_id=%d in DB%d"%(plc[i]['peer_person_id'],i),
+ retcod=os.system("ssh root@%s new_plc_api/person-password.sh %d"%(plc[i]['hostname'],plc[i]['peer_person_id']))
+ print '-> system returns',retcod
+ else:
+ i=local_peer
+ print "Locally setting passwd for person_id=%d in DB%d"%(plc[i]['peer_person_id'],i),
+ retcod=os.system("./person-password.sh -l %d"%(plc[i]['peer_person_id']))
+ print '-> system returns',retcod
+
+# this one gets cached
+def get_peer_id (i):
+ try:
+ return plc[i]['peer_id']
+ except:
+ peername = plc_name (peer_index(i))
+ peer_id = s[i].GetPeers([peername])[0]['peer_id']
+ plc[i]['peer_id'] = peer_id
+ return peer_id
+
+##############################
+def test00_refresh (message,args=[1,2]):
+ print '=== refresh',message
+ timer_show()
+ for i in args:
+ print '%02d:== Refreshing peer'%(i),
+ retcod=s[i].RefreshPeer(get_peer_id(i))
+ keys=retcod.keys()
+ keys.sort()
+ print "Result: {",
+ for key in keys:
+ if "time" not in key:
+ print key,retcod[key],
+ print "}"
+ print "+++ ellapsed: {",
+ timers=retcod['timers']
+ keys=timers.keys()
+ keys.sort()
+ for key in keys:
+ print key,timers[key],
+ print "}"
+ timer_show()
+
+####################
+def test01_site (args=[1,2]):
+ for ns in myrange(number_sites):
+ test01_site_n (ns,True,args)
+
+def test01_del_site (args=[1,2]):
+ for ns in myrange(number_sites):
+ test01_site_n (ns,False,args)
+
+def test01_site_n (ns,add_if_true,args=[1,2]):
+ for i in args:
+ login_base = site_login_base (i,ns)
+ try:
+ site_id = s[i].GetSites([login_base])[0]['site_id']
+ if not add_if_true:
+ if s[i].DeleteSite(site_id):
+ print "%02d:== deleted site_id %d"%(i,site_id)
+ except:
+ if add_if_true:
+ sitename=site_name(i,ns)
+ abbrev_name="abbr"+str(i)
+ max_slices = number_slices
+ site_id=s[i].AddSite ( {'name':plc_name(i),
+ 'abbreviated_name': abbrev_name,
+ 'login_base': login_base,
+ 'is_public': True,
+ 'url': 'http://%s.com/'%abbrev_name,
+ 'max_slices':max_slices})
+ ### max_slices does not seem taken into account at that stage
+ if site_id:
+ s[i].UpdateSite(site_id,{'max_slices':max_slices})
+ print '%02d:== Created site %d with max_slices=%d'%(i,site_id,max_slices)
+
+####################
+def test02_person (args=[1,2]):
+ for np in myrange(number_persons):
+ test02_person_n (np,True,args)
+
+def test02_del_person (args=[1,2]):
+ for np in myrange(number_persons):
+ test02_person_n (np,False,args)
+
+def test02_person_n (np,add_if_true,args=[1,2]):
+ test02_person_n_ks (np, myrange(number_keys_per_person),add_if_true,args)
+
+def test02_person_n_ks (np,nks,add_if_true,args=[1,2]):
+ for i in args:
+ email = person_name(i,np)
+ try:
+ person_id=s[i].GetPersons([email])[0]['person_id']
+ if not add_if_true:
+ if s[i].DeletePerson(person_id):
+ print "%02d:== deleted person_id %d"%(i,person_id)
+ except:
+ if add_if_true:
+ password = plc[i]['person-password']
+ person_id=s[i].AddPerson({'first_name':'Your average',
+ 'last_name':'User%d'%np,
+ 'role_ids':[30],
+ 'email':email,
+ 'password': password })
+ if person_id:
+ print '%02d:== created user account %d, %s - %s'%(i, person_id,email,password)
+ for nk in nks:
+ key=key_name(i,np,nk)
+ s[i].AddPersonKey(email,{'key_type':'ssh', 'key':key})
+ print '%02d:== added key %s to person %s'%(i,key,email)
+
+####################
+# retrieves node_id from hostname - checks for local nodes only
+def get_local_node_id(i,nodename):
+ return s[i].GetNodes({'hostname':nodename,'peer_id':None})[0]['node_id']
+
+# clean all local nodes - foreign nodes are not supposed to be cleaned up manually
+def clean_all_nodes (args=[1,2]):
+ for i in args:
+ print '%02d:== Cleaning all nodes'%i
+ local_nodes = s[i].GetNodes({'peer_id':None})
+ if local_nodes:
+ for node in local_nodes:
+ print '%02d:==== Cleaning node %d'%(i,node['node_id'])
+ s[i].DeleteNode(node['node_id'])
+
+def test03_node (args=[1,2]):
+ for nn in myrange(number_nodes):
+ test03_node_n (nn,args)
+
+def test03_node_n (nn,args=[1,2]):
+ for i in args:
+ nodename = node_name(i,nn)
+ try:
+ get_local_node_id(i,nodename)
+ except:
+ login_base=site_login_base(i,map_on_site(nn))
+ n=s[i].AddNode(login_base,{'hostname': nodename})
+ if n:
+ print '%02d:== Added node %d %s'%(i,n,node_name(i,nn))
+
+def test02_delnode (args=[1,2]):
+ for nn in myrange(number_nodes):
+ test02_delnode_n (nn,args)
+
+def test02_delnode_n (nn,args=[1,2]):
+ for i in args:
+ nodename = node_name(i,nn)
+ node_id = get_local_node_id (i,nodename)
+ retcod=s[i].DeleteNode(nodename)
+ if retcod:
+ print '%02d:== Deleted node %d, returns %s'%(i,node_id,retcod)
+
+####################
+def clean_all_slices (args=[1,2]):
+ for i in args:
+ print '%02d:== Cleaning all slices'%i
+ for slice in s[i].GetSlices({'peer_id':None}):
+ slice_id = slice['slice_id']
+ if slice_id not in system_slices_ids:
+ if s[i].DeleteSlice(slice_id):
+ print '%02d:==== Cleaned slice %d'%(i,slice_id)
+
+def test04_slice (args=[1,2]):
+ for n in myrange(number_slices):
+ test04_slice_n (n,args)
+
+def test04_slice_n (ns,args=[1,2]):
+ for i in args:
+ peer=peer_index(i)
+ plcname=plc_name(i)
+ slicename=slice_name(i,ns)
+ max_nodes=number_nodes
+ try:
+ s[i].GetSlices([slicename])[0]
+ except:
+ slice_id=s[i].AddSlice ({'name':slicename,
+ 'description':'slice %s on %s'%(slicename,plcname),
+ 'url':'http://planet-lab.org/%s'%slicename,
+ 'max_nodes':max_nodes,
+ 'instanciation':'plc-instantiated',
+ })
+ if slice_id:
+ print '%02d:== created slice %d - max nodes=%d'%(i,slice_id,max_nodes)
+ actual_persons_per_slice = min (number_persons,number_persons_per_slice)
+ person_indexes=[map_on_person (p+ns) for p in range(actual_persons_per_slice)]
+ for np in person_indexes:
+ email = person_name (i,np)
+ retcod = s[i].AddPersonToSlice (email, slicename)
+ print '%02d:== Attached person %s to slice %s'%(i,email,slicename)
+
+
+def test04_node_slice (is_local, add_if_true, args=[1,2]):
+ for ns in myrange(number_slices):
+ test04_node_slice_ns (ns,is_local, add_if_true, args)
+
+def test04_node_slice_ns (ns,is_local, add_if_true, args=[1,2]):
+ actual_nodes_per_slice = min (number_nodes,number_nodes_per_slice)
+ node_indexes = [ map_on_node (n+ns) for n in range(actual_nodes_per_slice)]
+ test04_node_slice_nl_n (node_indexes,ns,is_local, add_if_true, args)
+
+def test04_node_slice_nl_n (nnl,ns,is_local, add_if_true, args=[1,2]):
+ for i in args:
+ peer=peer_index(i)
+ sname = slice_name (i,ns)
+
+ if is_local:
+ hostnames=[node_name(i,nn) for nn in nnl]
+ nodetype='local'
+ else:
+ hostnames=[node_name(peer,nn) for nn in nnl]
+ nodetype='foreign'
+ if add_if_true:
+ res=s[i].AddSliceToNodes (sname,hostnames)
+ message="added"
+ else:
+ res=s[i].DeleteSliceFromNodes (sname,hostnames)
+ message="deleted"
+ if res:
+ print '%02d:== %s in slice %s %s '%(i,message,sname,nodetype),
+ print hostnames
+
+def test04_slice_add_lnode (args=[1,2]):
+ test04_node_slice (True,True,args)
+
+def test04_slice_add_fnode (args=[1,2]):
+ test04_node_slice (False,True,args)
+
+def test04_slice_del_lnode (args=[1,2]):
+ test04_node_slice (True,False,args)
+
+def test04_slice_del_fnode (args=[1,2]):
+ test04_node_slice (False,False,args)
+
+####################
+def test05_sat (args=[1,2]):
+ for i in args:
+ name = sat_name(i)
+ try:
+ sat_id=s[i].GetSliceAttributeTypes ([name])[0]
+ except:
+ description="custom sat on plc%d"%i
+ min_role_id=10
+ sat_id=s[i].AddSliceAttributeType ({ 'name':name,
+ 'description': description,
+ 'min_role_id' : min_role_id})
+ if sat_id:
+ print '%02d:== created SliceAttributeType = %d'%(i,sat_id)
+
+# for test, we create 4 slice_attributes
+# on slice1 - sat=custom_made (see above) - all nodes
+# on slice1 - sat=custom_made (see above) - node=n1
+# on slice1 - sat='net_max' - all nodes
+# on slice1 - sat='net_max' - node=n1
+
+def test05_sa_atom (slice_name,sat_name,value,node,i):
+ sa_id=s[i].GetSliceAttributes({'name':sat_name,
+ 'value':value})
+ if not sa_id:
+ if node:
+ sa_id=s[i].AddSliceAttribute(slice_name,
+ sat_name,
+ value,
+ node)
+ else:
+ print 'slice_name',slice_name,'sat_name',sat_name
+ sa_id=s[i].AddSliceAttribute(slice_name,
+ sat_name,
+ value)
+ if sa_id:
+ print '%02d:== created SliceAttribute = %d'%(i,sa_id),
+ print 'On slice',slice_name,'and node',node
+
+def test05_sa (args=[1,2]):
+ for i in args:
+ test05_sa_atom (slice_name(i,1),sat_name(i),'custom sat/all nodes',None,i)
+ test05_sa_atom (slice_name(i,1),sat_name(i),'custom sat/node1',node_name(i,1),i)
+ test05_sa_atom (slice_name(i,1),'net_max','predefined sat/all nodes',None,i)
+ test05_sa_atom (slice_name(i,1),'net_max','predefined sat/node1',node_name(i,1),i)
+
+##############################
+# readable dumps
+##############################
+def p_site (s):
+ print s['site_id'],s['peer_id'],s['login_base'],s['name'],s['node_ids']
+
+def p_key (k):
+ print k['key_id'],k['peer_id'],k['key']
+
+def p_person (p):
+ print p['person_id'],p['peer_id'],p['email'],'keys:',p['key_ids'],'sites:',p['site_ids']
+
+def p_node(n):
+ print n['node_id'],n['peer_id'],n['hostname'],'sls=',n['slice_ids'],'site=',n['site_id']
+
+def p_slice(s):
+ print s['slice_id'],s['peer_id'],s['name'],'nodes=',s['node_ids'],'persons=',s['person_ids']
+ print '---','sas=',s['slice_attribute_ids'],s['name'],'crp=',s['creator_person_id']
+ print "--- 'expires':",s['expires']
+
+def p_sat(sat):
+ print sat['attribute_type_id'],sat['peer_id'], sat['name'], sat['min_role_id'], sat['description']
+
+def p_sa (sa):
+ print sa['slice_attribute_id'],sa['peer_id'],sa['name'],'AT_id:',sa['attribute_type_id']
+ print '---','v=',sa['value'],'sl=',sa['slice_id'],'n=',sa['node_id']
+
+import pprint
+pretty_printer=pprint.PrettyPrinter(5)
+
+def p_sliver (margin,x):
+ print margin,'SLIVERS for : hostname',x['hostname']
+ print margin,'%d config files'%len(x['conf_files'])
+ for sv in x['slivers']:
+ p_sliver_slice(margin,sv,x['hostname'])
+
+def p_sliver_slice(margin,sliver,hostname):
+ print margin,'SLIVER on hostname %s, s='%hostname,sliver['name']
+ print margin,'KEYS',
+ pretty_printer.pprint(sliver['keys'])
+ print margin,'ATTRIBUTES',
+ pretty_printer.pprint(sliver['attributes'])
+
+def dump (args=[1,2]):
+ for i in args:
+ print '%02d:============================== DUMPING'%i
+ print '%02d: SITES'%i
+ [p_site(x) for x in s[i].GetSites()]
+ print '%02d: KEYS'%i
+ [p_key(x) for x in s[i].GetKeys()]
+ print '%02d: PERSONS'%i
+ [p_person(x) for x in s[i].GetPersons()]
+ print '%02d: NODES'%i
+ [p_node(x) for x in s[i].GetNodes()]
+ print '%02d: SLICES'%i
+ [p_slice(x) for x in s[i].GetSlices()]
+ print '%02d: Slice Attribute Types'%i
+ [p_sat(x) for x in s[i].GetSliceAttributeTypes()]
+ print '%02d: Slice Attributes'%i
+ [p_sa(x) for x in s[i].GetSliceAttributes()]
+ timer_show()
+ print '%02d: Gathering all slivers'%i
+ slivers = s[i].GetSlivers()
+ timer_show()
+ snodes=min(3,number_nodes)
+ print '%02d: SLIVERS for first %d nodes'%(i,snodes)
+ [p_sliver('%02d:'%i,x) for x in s[i].GetSlivers(myrange(snodes))]
+ print '%02d:============================== END DUMP'%i
+
+
+## for usage under the api
+def pt ():
+ for x in GetSites():
+ p_site(x)
+
+def pk ():
+ for x in GetKeys():
+ print (x['key_id'],x['peer_id'],x['key'])
+
+def pp ():
+ for x in GetPersons():
+ p_person(x)
+
+def pn ():
+ for x in GetNodes():
+ p_node(x)
+
+def ps ():
+ for x in GetSlices():
+ p_slice(x)
+
+def psat():
+ for x in GetSliceAttributeTypes():
+ p_sat(x)
+
+def psa():
+ for x in GetSliceAttributes():
+ p_sa(x)
+
+def pv ():
+ for s in GetSlivers():
+ p_sliver('',s)
+
+def all():
+ print 'SITES'
+ pt()
+ print 'KEYS'
+ pk()
+ print 'PERSONS'
+ pp()
+ print 'NODES'
+ pn()
+ print 'SLICES'
+ ps()
+ print 'SLICE ATTR TYPES'
+ psat()
+ print 'SLICE ATTRS'
+ psa()
+ print 'SLIVERS'
+ pv()
+
+
+####################
+def test_all_init ():
+ message ("INIT")
+ test00_init ()
+ test00_print ()
+ test00_admin_person ()
+ test00_admin_enable ()
+ test00_peer_person ()
+ test00_peer ()
+ test00_peer_passwd ()
+
+def test_all_sites ():
+ test01_site ()
+ test00_refresh ('after site creation')
+
+def test_all_persons ():
+ test02_del_person()
+ test00_refresh ('before persons&keys creation')
+ check_keys(0,0)
+ check_persons(system_persons,system_persons_cross)
+ message ("Creating persons&keys")
+ test02_person ()
+ if not fast_flag:
+ message ("1 extra del/add cycle for unique indexes")
+ test02_del_person([2])
+ test02_person([2])
+ check_keys(number_persons*number_keys_per_person,0)
+ check_persons(system_persons+number_persons,system_persons_cross)
+ test00_refresh ('after persons&keys creation')
+ check_keys(number_persons*number_keys_per_person,number_persons*number_keys_per_person)
+ check_persons(system_persons+number_persons,system_persons_cross+number_persons)
+
+def test_all_nodes ():
+
+ message ("RESETTING NODES")
+ clean_all_nodes ()
+ test00_refresh ('cleaned nodes')
+ check_nodes(0,0)
+
+ # create one node on each site
+ message ("CREATING NODES")
+ test03_node ()
+ check_nodes(number_nodes,0)
+ test00_refresh ('after node creation')
+ check_nodes(number_nodes,number_nodes)
+ test02_delnode([2])
+ if not fast_flag:
+ message ("2 extra del/add cycles on plc2 for different indexes")
+ test03_node ([2])
+ test02_delnode([2])
+ test03_node ([2])
+ test02_delnode([2])
+ check_nodes(0,number_nodes,[2])
+ test00_refresh('after deletion on plc2')
+ check_nodes(number_nodes,0,[1])
+ check_nodes(0,number_nodes,[2])
+ message ("ADD on plc2 for different indexes")
+ test03_node ([2])
+ check_nodes (number_nodes,0,[1])
+ check_nodes (number_nodes,number_nodes,[2])
+ test00_refresh('after re-creation on plc2')
+ check_nodes (number_nodes,number_nodes,)
+
+def test_all_addslices ():
+
+ # reset
+ message ("RESETTING SLICES TEST")
+ clean_all_nodes ()
+ test03_node ()
+ clean_all_slices ()
+ test00_refresh ("After slices init")
+
+ # create slices on plc1
+ message ("CREATING SLICES on plc1")
+ test04_slice ([1])
+
+ check_slices (total_slices(),system_slices(),[1])
+ check_slices (system_slices(),system_slices(),[2])
+ test00_refresh ("after slice created on plc1")
+ check_slices (total_slices(),system_slices(),[1])
+ check_slices (system_slices(),total_slices(),[2])
+ # no slice has any node yet
+ check_local_slice_nodes(0,[1])
+ check_foreign_slice_nodes(0,[2])
+
+ # insert local nodes in local slice on plc1
+ message ("ADDING LOCAL NODES IN SLICES")
+ test04_slice_add_lnode ([1])
+ # of course the change is only local
+ check_local_slice_nodes (number_nodes_per_slice,[1])
+ check_foreign_slice_nodes(0,[2])
+
+ # refreshing
+ test00_refresh ("After local nodes were added on plc1")
+ check_local_slice_nodes (number_nodes_per_slice,[1])
+ check_foreign_slice_nodes (number_nodes_per_slice,[2])
+
+ # now we add foreign nodes into local slice
+ message ("ADDING FOREIGN NODES IN SLICES")
+ test04_slice_add_fnode ([1])
+ check_local_slice_nodes (2*number_nodes_per_slice,[1])
+ check_foreign_slice_nodes (number_nodes_per_slice,[2])
+
+ # refreshing
+ test00_refresh ("After foreign nodes were added in plc1")
+ # remember that foreign slices only know about LOCAL nodes
+ # so this does not do anything
+ check_local_slice_nodes (2*number_nodes_per_slice,[1])
+ check_foreign_slice_nodes (2*number_nodes_per_slice,[2])
+
+ check_slivers_1(total_slivers())
+
+def test_all_delslices ():
+
+ message ("DELETING FOREIGN NODES FROM SLICES")
+ test04_slice_del_fnode([1])
+ check_local_slice_nodes (number_nodes_per_slice,[1])
+ check_foreign_slice_nodes (2*number_nodes_per_slice,[2])
+ # mmh?
+ check_slivers_1(total_slivers(),[1])
+
+ test00_refresh ("After foreign nodes were removed on plc1")
+ check_local_slice_nodes (number_nodes_per_slice,[1])
+ check_foreign_slice_nodes (number_nodes_per_slice,[2])
+
+ message ("DELETING LOCAL NODES FROM SLICES")
+ test04_slice_del_lnode([1])
+ check_local_slice_nodes (0,[1])
+ check_foreign_slice_nodes (number_nodes_per_slice,[2])
+
+ test00_refresh ("After local nodes were removed on plc1")
+ check_local_slice_nodes (0,[1])
+ check_foreign_slice_nodes (0,[2])
+
+ message ("CHECKING SLICES CLEAN UP")
+ clean_all_slices([1])
+ check_slices (system_slices(),system_slices(),[1])
+ check_slices (system_slices(),total_slices(),[2])
+ test00_refresh ("After slices clenaup")
+ check_slices(system_slices(),system_slices())
+
+def test_all_slices ():
+ test_all_addslices ()
+ test_all_delslices ()
+
+def test_all_sats ():
+ test05_sat ()
+ test00_refresh("after SliceAttributeType creation")
+
+def test_all ():
+ test_all_init ()
+ timer_show()
+ test_all_sites ()
+ timer_show()
+ test_all_persons ()
+ timer_show()
+ test_all_nodes ()
+ timer_show()
+ test_all_slices ()
+ timer_show()
+ test_all_sats ()
+ timer_show()
+ dump()
+ timer_show()
+ message("END")
+
+### ad hoc test sequences
+# we just create objects here so we can dump the DB
+def populate ():
+ timer_start()
+ test_all_init()
+ timer_show()
+ test01_site()
+ timer_show()
+ test02_person()
+ timer_show()
+ test03_node()
+ timer_show()
+ test04_slice([1])
+ timer_show()
+ test04_slice_add_lnode([1])
+ timer_show()
+ test05_sat()
+ timer_show()
+ test05_sa([1])
+ timer_show()
+ message("END")
+
+def populate_end():
+ test00_init()
+ test00_refresh ("Peer 1 for publishing foreign nodes from 2",[1])
+ timer_show()
+ test04_slice_add_fnode([1])
+ timer_show()
+ test00_refresh("populate: refresh all")
+ timer_show()
+ test00_refresh("empty refresh")
+ dump()
+ timer_show()
+ message("END")
+
+# temporary - scratch as needed
+def test_now ():
+ populate()
+ test00_refresh('peer 1 gets plc2 nodes',[1])
+ test04_slice_add_fnode([1])
+ test00_refresh('final',[1])
+
+# test_all_sites ()
+# clean_all_nodes()
+# clean_all_slices()
+# populate()
+
+#####
+def usage ():
+ print "Usage: %s [-n] [-f]"%sys.argv[0]
+ print " -n runs test_now instead of test_all"
+ print " -p runs populate instead of test_all"
+ print " -e runs populate_end of test_all"
+ print " -m run in mini mode (1 instance of each class)"
+ print " -b performs big run"
+ print " -H performs huge run"
+ print " -f n : increases normal sizes by <n>"
+ print " -l n : tester runs locally for peer <n>, rather than through xmlrpc"
+
+ sys.exit(1)
+
+def main ():
+ try:
+ (o,a) = getopt.getopt(sys.argv[1:], "emnpbHf:l:")
+ except:
+ usage()
+ func = test_all
+ for (opt,val) in o:
+ if opt=='-n':
+ print 'Running test_now'
+ func = test_now
+ elif opt=='-p':
+ print 'Running populate'
+ func = populate
+ elif opt=='-e':
+ print 'Running populate_end'
+ func = populate_end
+ elif opt=='-m':
+ mini()
+ elif opt=='-b':
+ big()
+ elif opt=='-H':
+ huge()
+ elif opt=='-f':
+ factor=int(val)
+ apply_factor(factor)
+ elif opt=='-l':
+ global local_peer
+ local_peer=int(val)
+ if local_peer not in (1,2):
+ usage()
+ else:
+ usage()
+ if a:
+ usage()
+ show_test()
+ func()
+ timer_show()
- print "Refreshing peer", peer['peername']
- print RefreshPeer(peer['peer_id'])
+if __name__ == '__main__':
+ normal()
+ main()
+
-- Version
--------------------------------------------------------------------------------
--- Database version
+--version
CREATE TABLE plc_db_version (
version integer NOT NULL
) WITH OIDS;
INSERT INTO plc_db_version (version) VALUES (4);
+--------------------------------------------------------------------------------
+-- Peers
+--------------------------------------------------------------------------------
+
+-- Peers
+CREATE TABLE peers (
+ peer_id serial PRIMARY KEY, -- identifier
+ peername text NOT NULL, -- free text
+ peer_url text NOT NULL, -- the url of that peer's API
+ cacert text, -- (SSL) Public certificate of peer API server
+ key text, -- (GPG) Public key used for authentication
+
+ deleted boolean NOT NULL DEFAULT false
+) WITH OIDS;
+
--------------------------------------------------------------------------------
-- Accounts
--------------------------------------------------------------------------------
enabled boolean NOT NULL DEFAULT false, -- Has been disabled
-- Password
- password text NOT NULL DEFAULT 'nopass', -- Password (md5crypted)
+ password text NOT NULL, -- Password (md5crypted)
verification_key text, -- Reset password key
verification_expires timestamp without time zone,
-- Timestamps
date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP
+ last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
+
+ peer_id integer REFERENCES peers -- From which peer
) WITH OIDS;
CREATE INDEX persons_email_idx ON persons (email) WHERE deleted IS false;
-- Timestamps
date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP
+ last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
+
+ peer_id integer REFERENCES peers -- From which peer
) WITH OIDS;
CREATE INDEX sites_login_base_idx ON sites (login_base) WHERE deleted IS false;
key_id serial PRIMARY KEY, -- Key identifier
key_type text REFERENCES key_types NOT NULL, -- Key type
key text NOT NULL, -- Key material
- is_blacklisted boolean NOT NULL DEFAULT false -- Has been blacklisted
+ is_blacklisted boolean NOT NULL DEFAULT false, -- Has been blacklisted
+ peer_id integer REFERENCES peers -- From which peer
) WITH OIDS;
-- Account authentication key(s)
CREATE TABLE person_key (
- key_id integer REFERENCES keys PRIMARY KEY, -- Key identifier
- person_id integer REFERENCES persons NOT NULL -- Account identifier
+ person_id integer REFERENCES persons NOT NULL, -- Account identifier
+ key_id integer REFERENCES keys NOT NULL, -- Key identifier
+ PRIMARY KEY (person_id, key_id)
) WITH OIDS;
CREATE INDEX person_key_person_id_idx ON person_key (person_id);
+CREATE INDEX person_key_key_id_idx ON person_key (key_id);
CREATE VIEW person_keys AS
SELECT person_id,
-- Timestamps
date_created timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
- last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP
+ last_updated timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
+
+ peer_id integer REFERENCES peers -- From which peer
) WITH OIDS;
CREATE INDEX nodes_hostname_idx ON nodes (hostname) WHERE deleted IS false;
CREATE INDEX nodes_site_id_idx ON nodes (site_id) WHERE deleted IS false;
CREATE TABLE slices (
slice_id serial PRIMARY KEY, -- Slice identifier
site_id integer REFERENCES sites NOT NULL, -- Site identifier
+ peer_id integer REFERENCES peers, -- on which peer
name text NOT NULL, -- Slice name
instantiation text REFERENCES slice_instantiations NOT NULL DEFAULT 'plc-instantiated', -- Slice state, e.g. plc-instantiated
attribute_type_id serial PRIMARY KEY, -- Attribute type identifier
name text UNIQUE NOT NULL, -- Attribute name
description text, -- Attribute description
- min_role_id integer REFERENCES roles DEFAULT 10 -- If set, minimum (least powerful) role that can set or change this attribute
+ min_role_id integer REFERENCES roles DEFAULT 10, -- If set, minimum (least powerful) role that can set or change this attribute
+
+ peer_id integer REFERENCES peers -- From which peer
) WITH OIDS;
-- Slice/sliver attributes
slice_id integer REFERENCES slices NOT NULL, -- Slice identifier
node_id integer REFERENCES nodes, -- Sliver attribute if set
attribute_type_id integer REFERENCES slice_attribute_types NOT NULL, -- Attribute type identifier
- value text
+ value text,
+
+ peer_id integer REFERENCES peers -- From which peer
) WITH OIDS;
CREATE INDEX slice_attribute_slice_id_idx ON slice_attribute (slice_id);
CREATE INDEX slice_attribute_node_id_idx ON slice_attribute (node_id);
FROM slice_attribute
GROUP BY slice_id;
---------------------------------------------------------------------------------
--- Peers
---------------------------------------------------------------------------------
-
--- Peers
-CREATE TABLE peers (
- peer_id serial PRIMARY KEY, -- Peer identifier
- peername text NOT NULL, -- Peer name
- peer_url text NOT NULL, -- (HTTPS) URL of the peer PLCAPI interface
- cacert text, -- (SSL) Public certificate of peer API server
- key text, -- (GPG) Public key used for authentication
- deleted boolean NOT NULL DEFAULT false
-) WITH OIDS;
-CREATE INDEX peers_peername_idx ON peers (peername) WHERE deleted IS false;
-
--- Objects at each peer
-CREATE TABLE peer_site (
- site_id integer REFERENCES sites PRIMARY KEY, -- Local site identifier
- peer_id integer REFERENCES peers NOT NULL, -- Peer identifier
- peer_site_id integer NOT NULL, -- Foreign site identifier at peer
- UNIQUE (peer_id, peer_site_id) -- The same foreign site should not be cached twice
-) WITH OIDS;
-CREATE INDEX peer_site_peer_id_idx ON peers (peer_id);
-
-CREATE VIEW peer_sites AS
-SELECT peer_id,
-array_accum(site_id) AS site_ids,
-array_accum(peer_site_id) AS peer_site_ids
-FROM peer_site
-GROUP BY peer_id;
-
-CREATE TABLE peer_person (
- person_id integer REFERENCES persons PRIMARY KEY, -- Local user identifier
- peer_id integer REFERENCES peers NOT NULL, -- Peer identifier
- peer_person_id integer NOT NULL, -- Foreign user identifier at peer
- UNIQUE (peer_id, peer_person_id) -- The same foreign user should not be cached twice
-) WITH OIDS;
-CREATE INDEX peer_person_peer_id_idx ON peer_person (peer_id);
-
-CREATE VIEW peer_persons AS
-SELECT peer_id,
-array_accum(person_id) AS person_ids,
-array_accum(peer_person_id) AS peer_person_ids
-FROM peer_person
-GROUP BY peer_id;
-
-CREATE TABLE peer_key (
- key_id integer REFERENCES keys PRIMARY KEY, -- Local key identifier
- peer_id integer REFERENCES peers NOT NULL, -- Peer identifier
- peer_key_id integer NOT NULL, -- Foreign key identifier at peer
- UNIQUE (peer_id, peer_key_id) -- The same foreign key should not be cached twice
-) WITH OIDS;
-CREATE INDEX peer_key_peer_id_idx ON peer_key (peer_id);
-
-CREATE VIEW peer_keys AS
-SELECT peer_id,
-array_accum(key_id) AS key_ids,
-array_accum(peer_key_id) AS peer_key_ids
-FROM peer_key
-GROUP BY peer_id;
-
-CREATE TABLE peer_node (
- node_id integer REFERENCES nodes PRIMARY KEY, -- Local node identifier
- peer_id integer REFERENCES peers NOT NULL, -- Peer identifier
- peer_node_id integer NOT NULL, -- Foreign node identifier
- UNIQUE (peer_id, peer_node_id) -- The same foreign node should not be cached twice
-) WITH OIDS;
-CREATE INDEX peer_node_peer_id_idx ON peer_node (peer_id);
-
-CREATE VIEW peer_nodes AS
-SELECT peer_id,
-array_accum(node_id) AS node_ids,
-array_accum(peer_node_id) AS peer_node_ids
-FROM peer_node
-GROUP BY peer_id;
-
-CREATE TABLE peer_slice (
- slice_id integer REFERENCES slices PRIMARY KEY, -- Local slice identifier
- peer_id integer REFERENCES peers NOT NULL, -- Peer identifier
- peer_slice_id integer NOT NULL, -- Slice identifier at peer
- UNIQUE (peer_id, peer_slice_id) -- The same foreign slice should not be cached twice
-) WITH OIDS;
-CREATE INDEX peer_slice_peer_id_idx ON peer_slice (peer_id);
-
-CREATE VIEW peer_slices AS
-SELECT peer_id,
-array_accum(slice_id) AS slice_ids,
-array_accum(peer_slice_id) AS peer_slice_ids
-FROM peer_slice
-GROUP BY peer_id;
-
--------------------------------------------------------------------------------
-- Authenticated sessions
--------------------------------------------------------------------------------
-- Events
--------------------------------------------------------------------------------
+
-- Events
CREATE TABLE events (
event_id serial PRIMARY KEY, -- Event identifier
person_id integer REFERENCES persons, -- Person responsible for event, if any
node_id integer REFERENCES nodes, -- Node responsible for event, if any
fault_code integer NOT NULL DEFAULT 0, -- Did this event result in error
- call_name text NOT NULL, -- Call responsible for this event
- call text NOT NULL, -- Call responsible for this event, including parameters
+ call_name text Not NULL, -- Call responsible for this event
+ call text NOT NULL, -- Call responsible for this event, including paramters
message text, -- High level description of this event
- runtime float DEFAULT 0, -- Event run time
- time timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP -- Event timestamp
+ runtime float, -- Event run time
+ time timestamp without time zone NOT NULL DEFAULT CURRENT_TIMESTAMP -- Event timestamp
) WITH OIDS;
--- Database object(s) that may have been affected by a particular event
+-- Event objects
CREATE TABLE event_object (
event_id integer REFERENCES events NOT NULL, -- Event identifier
object_id integer NOT NULL -- Object identifier
events.fault_code,
events.call_name,
events.call,
-events.message,
events.runtime,
CAST(date_part('epoch', events.time) AS bigint) AS time,
COALESCE(event_objects.object_ids, '{}') AS object_ids
persons.phone,
persons.url,
persons.bio,
+persons.peer_id,
CAST(date_part('epoch', persons.date_created) AS bigint) AS date_created,
CAST(date_part('epoch', persons.last_updated) AS bigint) AS last_updated,
-peer_person.peer_id,
-peer_person.peer_person_id,
COALESCE(person_roles.role_ids, '{}') AS role_ids,
COALESCE(person_roles.roles, '{}') AS roles,
COALESCE(person_sites.site_ids, '{}') AS site_ids,
COALESCE(person_keys.key_ids, '{}') AS key_ids,
COALESCE(person_slices.slice_ids, '{}') AS slice_ids
FROM persons
-LEFT JOIN peer_person USING (person_id)
LEFT JOIN person_roles USING (person_id)
LEFT JOIN person_sites USING (person_id)
LEFT JOIN person_keys USING (person_id)
LEFT JOIN person_slices USING (person_id);
+-- Objects at each peer
+CREATE VIEW peer_sites AS
+SELECT peer_id,
+array_accum(site_id) AS site_ids
+FROM sites
+GROUP BY peer_id;
+
+CREATE VIEW peer_persons AS
+SELECT peer_id,
+array_accum(person_id) AS person_ids
+FROM persons
+GROUP BY peer_id;
+
+CREATE VIEW peer_keys AS
+SELECT peer_id,
+array_accum(key_id) AS key_ids
+FROM keys
+GROUP BY peer_id;
+
+CREATE VIEW peer_nodes AS
+SELECT peer_id,
+array_accum(node_id) AS node_ids
+FROM nodes
+GROUP BY peer_id;
+
+CREATE VIEW peer_slice_attribute_types AS
+SELECT peer_id,
+array_accum(attribute_type_id) AS attribute_type_ids
+FROM slice_attribute_types
+GROUP BY peer_id;
+
+CREATE VIEW peer_slice_attributes AS
+SELECT peer_id,
+array_accum(slice_attribute_id) AS slice_attribute_ids
+FROM slice_attribute
+GROUP BY peer_id;
+
+CREATE VIEW peer_slices AS
+SELECT peer_id,
+array_accum(slice_id) AS slice_ids
+FROM slices
+GROUP BY peer_id;
+
CREATE VIEW view_peers AS
SELECT
peers.*,
COALESCE(peer_sites.site_ids, '{}') AS site_ids,
-COALESCE(peer_sites.peer_site_ids, '{}') AS peer_site_ids,
COALESCE(peer_persons.person_ids, '{}') AS person_ids,
-COALESCE(peer_persons.peer_person_ids, '{}') AS peer_person_ids,
COALESCE(peer_keys.key_ids, '{}') AS key_ids,
-COALESCE(peer_keys.peer_key_ids, '{}') AS peer_key_ids,
COALESCE(peer_nodes.node_ids, '{}') AS node_ids,
-COALESCE(peer_nodes.peer_node_ids, '{}') AS peer_node_ids,
-COALESCE(peer_slices.slice_ids, '{}') AS slice_ids,
-COALESCE(peer_slices.peer_slice_ids, '{}') AS peer_slice_ids
+COALESCE(peer_slice_attribute_types.attribute_type_ids, '{}') AS attribute_type_ids,
+COALESCE(peer_slice_attributes.slice_attribute_ids, '{}') AS slice_attribute_ids,
+COALESCE(peer_slices.slice_ids, '{}') AS slice_ids
FROM peers
LEFT JOIN peer_sites USING (peer_id)
LEFT JOIN peer_persons USING (peer_id)
LEFT JOIN peer_keys USING (peer_id)
LEFT JOIN peer_nodes USING (peer_id)
+LEFT JOIN peer_slice_attribute_types USING (peer_id)
+LEFT JOIN peer_slice_attributes USING (peer_id)
LEFT JOIN peer_slices USING (peer_id);
CREATE VIEW view_nodes AS
nodes.node_id,
nodes.hostname,
nodes.site_id,
+nodes.peer_id,
nodes.boot_state,
nodes.deleted,
nodes.model,
nodes.key,
CAST(date_part('epoch', nodes.date_created) AS bigint) AS date_created,
CAST(date_part('epoch', nodes.last_updated) AS bigint) AS last_updated,
-peer_node.peer_id,
-peer_node.peer_node_id,
COALESCE(node_nodenetworks.nodenetwork_ids, '{}') AS nodenetwork_ids,
COALESCE(node_nodegroups.nodegroup_ids, '{}') AS nodegroup_ids,
COALESCE(node_slices.slice_ids, '{}') AS slice_ids,
COALESCE(node_conf_files.conf_file_ids, '{}') AS conf_file_ids,
node_session.session_id AS session
FROM nodes
-LEFT JOIN peer_node USING (node_id)
LEFT JOIN node_nodenetworks USING (node_id)
LEFT JOIN node_nodegroups USING (node_id)
LEFT JOIN node_slices USING (node_id)
CREATE VIEW view_nodegroups AS
SELECT
-nodegroups.*,
+nodegroups.nodegroup_id,
+nodegroups.name,
+nodegroups.description,
COALESCE(nodegroup_nodes.node_ids, '{}') AS node_ids,
COALESCE(nodegroup_conf_files.conf_file_ids, '{}') AS conf_file_ids
FROM nodegroups
CREATE VIEW view_conf_files AS
SELECT
-conf_files.*,
+conf_files.conf_file_id,
+conf_files.enabled,
+conf_files.source,
+conf_files.dest,
+conf_files.file_permissions,
+conf_files.file_owner,
+conf_files.file_group,
+conf_files.preinstall_cmd,
+conf_files.postinstall_cmd,
+conf_files.error_cmd,
+conf_files.ignore_cmd_errors,
+conf_files.always_update,
COALESCE(conf_file_nodes.node_ids, '{}') AS node_ids,
COALESCE(conf_file_nodegroups.nodegroup_ids, '{}') AS nodegroup_ids
FROM conf_files
CREATE VIEW view_pcus AS
SELECT
-pcus.*,
+pcus.pcu_id,
+pcus.site_id,
+pcus.hostname,
+pcus.ip,
+pcus.protocol,
+pcus.username,
+pcus.password,
+pcus.model,
+pcus.notes,
COALESCE(pcu_nodes.node_ids, '{}') AS node_ids,
COALESCE(pcu_nodes.ports, '{}') AS ports
FROM pcus
sites.latitude,
sites.longitude,
sites.url,
+sites.peer_id,
CAST(date_part('epoch', sites.date_created) AS bigint) AS date_created,
CAST(date_part('epoch', sites.last_updated) AS bigint) AS last_updated,
-peer_site.peer_id,
-peer_site.peer_site_id,
COALESCE(site_persons.person_ids, '{}') AS person_ids,
COALESCE(site_nodes.node_ids, '{}') AS node_ids,
COALESCE(site_addresses.address_ids, '{}') AS address_ids,
COALESCE(site_slices.slice_ids, '{}') AS slice_ids,
COALESCE(site_pcus.pcu_ids, '{}') AS pcu_ids
FROM sites
-LEFT JOIN peer_site USING (site_id)
LEFT JOIN site_persons USING (site_id)
LEFT JOIN site_nodes USING (site_id)
LEFT JOIN site_addresses USING (site_id)
CREATE VIEW view_addresses AS
SELECT
-addresses.*,
+addresses.address_id,
+addresses.line1,
+addresses.line2,
+addresses.line3,
+addresses.city,
+addresses.state,
+addresses.postalcode,
+addresses.country,
COALESCE(address_address_types.address_type_ids, '{}') AS address_type_ids,
COALESCE(address_address_types.address_types, '{}') AS address_types
FROM addresses
LEFT JOIN address_address_types USING (address_id);
-CREATE VIEW view_keys AS
-SELECT
-keys.*,
-person_key.person_id,
-peer_key.peer_id,
-peer_key.peer_key_id
-FROM keys
-LEFT JOIN person_key USING (key_id)
-LEFT JOIN peer_key USING (key_id);
-
CREATE VIEW view_slices AS
SELECT
slices.slice_id,
slices.site_id,
+slices.peer_id,
slices.name,
slices.instantiation,
slices.url,
slices.is_deleted,
CAST(date_part('epoch', slices.created) AS bigint) AS created,
CAST(date_part('epoch', slices.expires) AS bigint) AS expires,
-peer_slice.peer_id,
-peer_slice.peer_slice_id,
COALESCE(slice_nodes.node_ids, '{}') AS node_ids,
COALESCE(slice_persons.person_ids, '{}') AS person_ids,
COALESCE(slice_attributes.slice_attribute_ids, '{}') AS slice_attribute_ids
FROM slices
-LEFT JOIN peer_slice USING (slice_id)
LEFT JOIN slice_nodes USING (slice_id)
LEFT JOIN slice_persons USING (slice_id)
LEFT JOIN slice_attributes USING (slice_id);
+--
CREATE VIEW view_slice_attributes AS
SELECT
slice_attribute.slice_attribute_id,
slice_attribute_types.name,
slice_attribute_types.description,
slice_attribute_types.min_role_id,
-slice_attribute.value
+slice_attribute.value,
+slice_attribute.peer_id
FROM slice_attribute
INNER JOIN slice_attribute_types USING (attribute_type_id);
(login_base, name, abbreviated_name, max_slices)
VALUES
('pl', 'PlanetLab Central', 'PLC', 100);
+
+
+