#!/bin/bash
#
- # sfa Wraps PLCAPI into the SFA compliant API
+ # sfa
+ # Provides a generic SFA wrapper based on the initial PlanetLab Implementation
#
# hopefully right after plc
# chkconfig: 2345 61 39
#
- # description: Wraps PLCAPI into the SFA compliant API
- #
+ ### BEGIN INIT INFO
+ # Provides: sfa
+ # Required-Start: postgresql
+ # Required-Stop: postgresql
+ # Default-Start: 2 3 4 5
+ # Default-Stop: 0 1 6
+ # Short-Description: An implementation of the SFA Architecture
+ ### END INIT INFO
+
+ ####################
+ # borrowed from postgresql
+ function debian_get_postgresql_versions () {
+ versions=()
+ for v in `ls /usr/lib/postgresql/ 2>/dev/null`; do
+ if [ -x /usr/lib/postgresql/$v/bin/pg_ctl ] && [ ! -x /etc/init.d/postgresql-$v ]; then
+ versions+=($v)
+ fi
+ done
+ if [[ ${#versions[*]} == "0" ]]; then
+ echo "E: Missing postgresql installation. Aborting."
+ exit
+ fi
+ if [[ ${#versions[*]} != "1" ]]; then
+ echo "E: Too many postgresql versions installed. Aborting."
+ exit
+ fi
+ pgver=${versions[0]}
+ }
+
+ ####################
+ if [ -f /etc/redhat-release ] ; then
+ # source function library
+ . /etc/init.d/functions
+ PGDATA=/var/lib/pgsql/data/
+ PGWATCH=postmaster
+ PGLOCK=/var/lock/subsys/postgresql
+ SFALOCK=/var/lock/subsys/sfa-start.pid
+ elif [ -f /etc/debian_version ] ; then
+ . /etc/init.d/functions.sfa
+ debian_get_postgresql_versions
+ PGDATA=/etc/postgresql/$pgver/main/
+ PGWATCH=postgres
+ PGLOCK=/var/run/postgresql/$pgver-main.pid
+ SFALOCK=/var/run/sfa-start.pid
+ else
+ echo "initscript can only handle redhat/fedora or debian/ubuntu systems"
+ exit 1
+ fi
+
- # source function library
- . /etc/init.d/functions
- # Default locations
- PGDATA=/var/lib/pgsql/data
postgresql_conf=$PGDATA/postgresql.conf
- pghba_conf=$PGDATA/pg_hba.conf
+ pg_hba_conf=$PGDATA/pg_hba.conf
postgresql_sysconfig=/etc/sysconfig/pgsql
# SFA consolidated (merged) config file
function postgresql_check () {
# wait until postmaster is up and running - or 10s max
- if status postmaster >& /dev/null && [ -f /var/lock/subsys/postgresql ] ; then
+ if status $PGWATCH >& /dev/null && [ -f $PGLOCK ] ; then
# The only way we can be sure is if we can access it
for i in $(seq 1 10) ; do
# Must do this as the postgres user initially (before we
# Regenerate the main configuration file from default values
# overlaid with site-specific and current values.
- # Thierry -- 2007-07-05 : values in plc_config.xml are *not* taken into account here
files=( $sfa_default_config $sfa_local_config )
for file in "${files[@]}" ; do
if [ -n "$force" -o $file -nt $sfa_whole_config ] ; then
if [ -n "$force" -o $sfa_whole_config -nt /etc/sfa/sfa_config.sh ] ; then
sfa-config --shell $sfa_default_config $sfa_local_config > /etc/sfa/sfa_config.sh
fi
- # if [ -n "$force" -o $sfa_whole_config -nt /etc/sfa/php/sfa_config.php ] ; then
- # mkdir -p /etc/sfa/php
- # plc-config --php $sfa_whole_config >/etc/sfa/php/sfa_config.php
- # fi
# [re]generate the sfa_component_config
# this is a server-side thing but produces a file that somehow needs to be pushed
# only if enabled
[ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
- if [ ! -f /etc/myplc-release ] ; then
-
- ######## standalone deployment - no colocated myplc
-
- ######## sysconfig
- # Set data directory and redirect startup output to /var/log/pgsql
- mkdir -p $(dirname $postgresql_sysconfig)
- # remove previous definitions
- touch $postgresql_sysconfig
- tmp=${postgresql_sysconfig}.new
- ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig
- echo "PGDATA=$PGDATA"
- echo "PGLOG=/var/log/pgsql"
- echo "PGPORT=$SFA_DB_PORT"
- ) >> $tmp ; mv -f $tmp $postgresql_sysconfig
-
- ######## /var/lib/pgsql/data
- # Fix ownership (rpm installation may have changed it)
- chown -R -H postgres:postgres $(dirname $PGDATA)
-
- # PostgreSQL must be started at least once to bootstrap
- # /var/lib/pgsql/data
- if [ ! -f $postgresql_conf ] ; then
- service postgresql initdb &> /dev/null || :
- check
- fi
-
- ######## /var/lib/pgsql/data/postgresql.conf
- registry_ip=""
- foo=$(python -c "import socket; print socket.gethostbyname(\"$SFA_REGISTRY_HOST\")") && registry_ip="$foo"
- # Enable DB server. drop Postgresql<=7.x
- # PostgreSQL >=8.0 defines listen_addresses
- # listen on a specific IP + localhost, more robust when run within a vserver
- sed -i -e '/^listen_addresses/d' $postgresql_conf
- if [ -z "$registry_ip" ] ; then
- echo "listen_addresses = 'localhost'" >> $postgresql_conf
- else
- echo "listen_addresses = '${registry_ip},localhost'" >> $postgresql_conf
- fi
- # tweak timezone to be 'UTC'
- sed -i -e '/^timezone=/d' $postgresql_conf
- echo "timezone='UTC'" >> $postgresql_conf
-
- ######## /var/lib/pgsql/data/pg_hba.conf
- # Disable access to all DBs from all hosts
- sed -i -e '/^\(host\|local\)/d' $pg_hba_conf
-
- # Enable passwordless localhost access
- echo "local all all trust" >>$pg_hba_conf
- # grant access
- (
- echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
- [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
- ) >>$pg_hba_conf
-
- if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
- [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pg_hba_conf
- fi
-
- # Fix ownership (sed -i changes it)
- chown postgres:postgres $postgresql_conf $pg_hba_conf
-
- ######## compute a password if needed
- if [ -z "$SFA_DB_PASSWORD" ] ; then
- SFA_DB_PASSWORD=$(uuidgen)
- sfa-config --category=sfa_db --variable=password --value="$SFA_DB_PASSWORD" --save=$sfa_local_config $sfa_local_config >& /dev/null
- reload force
- fi
-
- else
+ #if ! rpm -q myplc >& /dev/null; then
+
+ ######## standalone deployment - no colocated myplc
+
+ ######## sysconfig
+ # Set data directory and redirect startup output to /var/log/pgsql
+ mkdir -p $(dirname $postgresql_sysconfig)
+ # remove previous definitions
+ touch $postgresql_sysconfig
+ tmp=${postgresql_sysconfig}.new
+ ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig
+ echo "PGDATA=$PGDATA"
+ echo "PGLOG=/var/log/pgsql"
+ echo "PGPORT=$SFA_DB_PORT"
+ ) >> $tmp ; mv -f $tmp $postgresql_sysconfig
+
+ ######## /var/lib/pgsql/data
+ # Fix ownership (rpm installation may have changed it)
+ chown -R -H postgres:postgres $(dirname $PGDATA)
+
+ # PostgreSQL must be started at least once to bootstrap
+ # /var/lib/pgsql/data
+ if [ ! -f $postgresql_conf ] ; then
+ service postgresql initdb &> /dev/null || :
+ check
+ fi
+
+ ######## /var/lib/pgsql/data/postgresql.conf
+ registry_ip=""
+ foo=$(python -c "import socket; print socket.gethostbyname(\"$SFA_REGISTRY_HOST\")") && registry_ip="$foo"
+ # Enable DB server. drop Postgresql<=7.x
+ # PostgreSQL >=8.0 defines listen_addresses
+ # listen on a specific IP + localhost, more robust when run within a vserver
+ sed -i -e '/^listen_addresses/d' $postgresql_conf
+ if [ -z "$registry_ip" ] ; then
+ echo "listen_addresses = 'localhost'" >> $postgresql_conf
+ else
+ echo "listen_addresses = '${registry_ip},localhost'" >> $postgresql_conf
+ fi
+ # tweak timezone to be 'UTC'
+ sed -i -e '/^timezone=/d' $postgresql_conf
+ echo "timezone='UTC'" >> $postgresql_conf
+
+ ######## /var/lib/pgsql/data/pg_hba.conf
+ # Disable access to all DBs from all hosts
+ sed -i -e '/^\(host\|local\)/d' $pghba_conf
+
+ # Enable passwordless localhost access
+ echo "local all all trust" >>$pghba_conf
+ # grant access
+ (
+ echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
+ [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
+ ) >>$pghba_conf
+
+ if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
+ [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pghba_conf
+ fi
+
+ # Fix ownership (sed -i changes it)
+ chown postgres:postgres $postgresql_conf $pghba_conf
+
+ ######## compute a password if needed
+ if [ -z "$SFA_DB_PASSWORD" ] ; then
+ SFA_DB_PASSWORD=$(uuidgen)
+ sfa-config --category=sfa_db --variable=password --value="$SFA_DB_PASSWORD" --save=$sfa_local_config $sfa_local_config >& /dev/null
+ reload force
+ fi
+
+ #else
######## we are colocated with a myplc
- # no need to worry about the pgsql setup (see /etc/plc.d/postgresql)
- # myplc enforces the password for its user
- PLC_DB_USER=$(plc-config --category=plc_db --variable=user)
- PLC_DB_PASSWORD=$(plc-config --category=plc_db --variable=password)
- # store this as the SFA user/password
- sfa-config --category=sfa_db --variable=user --value=$PLC_DB_USER --save=$sfa_local_config $sfa_local_config >& /dev/null
- sfa-config --category=sfa_db --variable=password --value=$PLC_DB_PASSWORD --save=$sfa_local_config $sfa_local_config >& /dev/null
- reload force
- fi
+ # no need to worry about the pgsql setup (see /etc/plc.d/postgresql)
+ # myplc enforces the password for its user
+
+ # The code below overwrites the site specific sfa db info with myplc db info.
+ # This is most likely unncecessary and wrong so I'm commenting it out for now.
+ # PLC_DB_USER=$(plc-config --category=plc_db --variable=user)
+ # PLC_DB_PASSWORD=$(plc-config --category=plc_db --variable=password)
+ # store this as the SFA user/password
+ # sfa-config --category=sfa_db --variable=user --value=$PLC_DB_USER --save=$sfa_local_config $sfa_local_config >& /dev/null
+ # sfa-config --category=sfa_db --variable=password --value=$PLC_DB_PASSWORD --save=$sfa_local_config $sfa_local_config >& /dev/null
+ # reload force
+ #fi
######## Start up the server
# not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if ! rpm -q myplc >& /dev/null ; then
+ if [ ! -f /etc/myplc-release ] ; then
echo STARTING...
service postgresql start >& /dev/null
fi
check
fi
check
- sfaadmin reg sync_db
+ # mention sfaadmin.py instead of just sfaadmin for safety
+ sfaadmin.py reg sync_db
MESSAGE=$"SFA: Checking for PostgreSQL server"
echo -n "$MESSAGE"
[ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
# not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if ! rpm -q myplc >& /dev/null ; then
+ if [ ! -f /etc/myplc-release ] ; then
service postgresql stop >& /dev/null
check
MESSAGE=$"Stopping PostgreSQL server"
[ "$SFA_FLASHPOLICY_ENABLED" == 1 ] && \
action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
- touch /var/lock/subsys/sfa-start.py
+ touch $SFALOCK
}
function stop() {
action $"Shutting down SFA" killproc sfa-start.py
+ # a possible alternative reads; esp. as we remove lock manually below
+ # echo $"Shutting down SFA" ; pkill '^sfa-start'
db_stop
- rm -f /var/lock/subsys/sfa-start.py
+ rm -f $SFALOCK
}
reload) reload force ;;
restart) stop; start ;;
condrestart)
- if [ -f /var/lock/subsys/sfa-start.py ]; then
+ if [ -f $SFALOCK ]; then
stop
start
fi
;;
status)
status sfa-start.py
+ # possible alternative for debian
+ # pids=$(pgrep '^sfa-start'); [ -n "$pids" ] && ps $pids
+
RETVAL=$?
;;
dbdump)
import shutil
from distutils.core import setup
+ from sfa.util.version import version_tag
+
scripts = glob("clientbin/*.py") + \
[
'config/sfa-config-tty',
'config/sfa-config',
- 'config/gen-sfa-cm-config.py',
+ # 'config/gen-sfa-cm-config.py',
'sfa/server/sfa-start.py',
- 'sfa/server/sfa_component_setup.py',
+ # 'sfa/server/sfa_component_setup.py',
'sfatables/sfatables',
'keyconvert/keyconvert.py',
'flashpolicy/sfa_flashpolicy.py',
'sfa/rspecs',
'sfa/rspecs/elements',
'sfa/rspecs/elements/versions',
+ 'sfa/rspecs/elements/v3',
'sfa/rspecs/versions',
'sfa/client',
'sfa/planetlab',
+ 'sfa/nitos',
+ 'sfa/dummy',
'sfa/openstack',
'sfa/federica',
+ 'sfa/senslab',
'sfatables',
'sfatables/commands',
'sfatables/processors',
]
- initscripts = [ 'sfa', 'sfa-cm' ]
+ initscripts = [ 'sfa' ]
+ if not os.path.isfile('/etc/redhat-release'): initscripts.append('functions.sfa')
data_files = [ ('/etc/sfa/', [ 'config/aggregates.xml',
'config/registries.xml',
setup(name='sfa',
packages = packages,
data_files = data_files,
- scripts = scripts)
+ scripts = scripts,
+ url="http://svn.planet-lab.org/wiki/SFATutorial",
+ author="Thierry Parmentelat, Tony Mack, Scott Baker",
+ author_email="thierry.parmentelat@inria.fr, tmack@princeton.cs.edu, smbaker@gmail.com",
+ version=version_tag)
-
+ ###
+ #
+ # Thierry - 2012 sept 21
+ #
+ # it seems terribly wrong that the client should decide to use PG- or PL- related code
+ # esp. in a context where we're trying to have more and more kinds of testbeds involved
+ #
+ # also, the 'users' filed that CreateSliver is expecting (the key point here is to get this right)
+ # is specified to have at least a urn and a list of keys, both of these being supported natively
+ # in the sfa db
+ # So long story short, it seems to me that we should have a common code that fills in 'urn' and 'keys'
+ # and then code that tentatively tries to add as much extra info that we can get on these users
+ #
+ # the fact e.g. that PlanetLab insists on getting a first_name and last_name is not
+ # exactly consistent with the GENI spec. of CreateSliver
+ #
def pg_users_arg(records):
users = []
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['reg-urn'],
- 'keys': record['reg-keys'],
- }
+ user = {'urn': record['geni_urn'],
+ 'keys': record['keys'],
+ 'email': record['email']}
users.append(user)
return users
- def sfa_users_arg(records, slice_record):
+ def sfa_users_arg (records, slice_record):
users = []
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'], #
- 'keys': record['keys'],
- 'email': record['email'], # needed for MyPLC
- 'person_id': record['person_id'], # needed for MyPLC
- 'first_name': record['first_name'], # needed for MyPLC
- 'last_name': record['last_name'], # needed for MyPLC
- 'slice_record': slice_record, # needed for legacy refresh peer
- 'key_ids': record['key_ids'] # needed for legacy refresh peer
- }
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
+ 'slice_record': slice_record,
+ }
+ # fill as much stuff as possible from planetlab or similar
+ # note that reg-email is not yet available
+ pl_fields = ['email', 'person_id', 'first_name', 'last_name', 'key_ids']
+ nitos_fields = [ 'email', 'user_id' ]
+ extra_fields = list ( set(pl_fields).union(set(nitos_fields)))
+ # try to fill all these in
+ for field in extra_fields:
+ if record.has_key(field): user[field]=record[field]
users.append(user)
- return users
+
+ return users
def sfa_to_pg_users_arg(users):
from sfa.client.sfi import save_records_to_file
from sfa.trust.hierarchy import Hierarchy
from sfa.trust.gid import GID
+ from sfa.trust.certificate import convert_public_key
from sfa.client.candidates import Candidates
+ from sfa.client.common import optparse_listvalue_callback, terminal_render, filter_records
+
pprinter = PrettyPrinter(indent=4)
try:
except:
help_basedir='*unable to locate Hierarchy().basedir'
- def optparse_listvalue_callback(option, opt, value, parser):
- setattr(parser.values, option.dest, value.split(','))
-
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('options', []).insert(0, (args, kwargs))
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='authority to list (hrn/urn - mandatory)')
@args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@args('-r', '--recursive', dest='recursive', metavar='<recursive>', help='list all child records',
- action='store_true', default=False)
- def list(self, xrn, type=None, recursive=False):
+ action='store_true', default=False)
+ @args('-v', '--verbose', dest='verbose', action='store_true', default=False)
+ def list(self, xrn, type=None, recursive=False, verbose=False):
"""List names registered at a given authority - possibly filtered by type"""
xrn = Xrn(xrn, type)
- options = {'recursive': recursive}
- records = self.api.manager.List(self.api, xrn.get_hrn(), options=options)
- for record in records:
- if not type or record['type'] == type:
- print "%s (%s)" % (record['hrn'], record['type'])
+ options_dict = {'recursive': recursive}
+ records = self.api.manager.List(self.api, xrn.get_hrn(), options=options_dict)
+ list = filter_records(type, records)
+ # terminal_render expects an options object
+ class Options: pass
+ options=Options()
+ options.verbose=verbose
+ terminal_render (list, options)
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
choices=('text', 'xml', 'simple'), help='display record in different formats')
def show(self, xrn, type=None, format=None, outfile=None):
"""Display details for a registered object"""
- records = self.api.manager.Resolve(self.api, xrn, type, True)
+ records = self.api.manager.Resolve(self.api, xrn, type, details=True)
for record in records:
sfa_record = Record(dict=record)
sfa_record.dump(format)
record_dict['pi'] = pis
return record_dict
+
+ @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
+ @args('-t', '--type', dest='type', metavar='<type>', help='object type (mandatory)',)
+ @args('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False, help='check all users GID')
+ @args('-v', '--verbose', dest='verbose', metavar='<verbose>', action='store_true', default=False, help='verbose mode: display user\'s hrn ')
+ def check_gid(self, xrn=None, type=None, all=None, verbose=None):
+ """Check the correspondance between the GID and the PubKey"""
+
+ # db records
+ from sfa.storage.alchemy import dbsession
+ from sfa.storage.model import RegRecord
+ db_query = dbsession.query(RegRecord).filter_by(type=type)
+ if xrn and not all:
+ hrn = Xrn(xrn).get_hrn()
+ db_query = db_query.filter_by(hrn=hrn)
+ elif all and xrn:
+ print "Use either -a or -x <xrn>, not both !!!"
+ sys.exit(1)
+ elif not all and not xrn:
+ print "Use either -a or -x <xrn>, one of them is mandatory !!!"
+ sys.exit(1)
+
+ records = db_query.all()
+ if not records:
+ print "No Record found"
+ sys.exit(1)
+
+ OK = []
+ NOK = []
+ ERROR = []
+ NOKEY = []
+ for record in records:
+ # get the pubkey stored in SFA DB
+ if record.reg_keys:
+ db_pubkey_str = record.reg_keys[0].key
+ try:
+ db_pubkey_obj = convert_public_key(db_pubkey_str)
+ except:
+ ERROR.append(record.hrn)
+ continue
+ else:
+ NOKEY.append(record.hrn)
+ continue
+
+ # get the pubkey from the gid
+ gid_str = record.gid
+ gid_obj = GID(string = gid_str)
+ gid_pubkey_obj = gid_obj.get_pubkey()
+
+ # Check if gid_pubkey_obj and db_pubkey_obj are the same
+ check = gid_pubkey_obj.is_same(db_pubkey_obj)
+ if check :
+ OK.append(record.hrn)
+ else:
+ NOK.append(record.hrn)
+
+ if not verbose:
+ print "Users NOT having a PubKey: %s\n\
+ Users having a non RSA PubKey: %s\n\
+ Users having a GID/PubKey correpondence OK: %s\n\
+ Users having a GID/PubKey correpondence Not OK: %s\n"%(len(NOKEY), len(ERROR), len(OK), len(NOK))
+ else:
+ print "Users NOT having a PubKey: %s and are: \n%s\n\n\
+ Users having a non RSA PubKey: %s and are: \n%s\n\n\
+ Users having a GID/PubKey correpondence OK: %s and are: \n%s\n\n\
+ Users having a GID/PubKey correpondence NOT OK: %s and are: \n%s\n\n"%(len(NOKEY),NOKEY, len(ERROR), ERROR, len(OK), OK, len(NOK), NOK)
+
+
+
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@args('-e', '--email', dest='email', default="",
help='Description, useful for slices', default=None)
@args('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
- @args('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ @args('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='slice researchers',
+ @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@args('-p', '--pis', dest='pis', metavar='<PIs>',
- help='Principal Investigators/Project Managers ',
+ help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
def register(self, xrn, type=None, url=None, description=None, key=None, slices='',
pis='', researchers='',email=''):
help='Description', default=None)
@args('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
- @args('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ @args('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='slice researchers',
+ @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@args('-p', '--pis', dest='pis', metavar='<PIs>',
- help='Principal Investigators/Project Managers ',
+ help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
def update(self, xrn, type=None, url=None, description=None, key=None, slices='',
pis='', researchers=''):
"""Update an existing Registry record"""
+ print 'incoming PIS',pis
record_dict = self._record_dict(xrn=xrn, type=type, url=url, description=description,
key=key, slices=slices, researchers=researchers, pis=pis)
self.api.manager.Update(self.api, record_dict)
importer.run()
def sync_db(self):
- """Initiailize or upgrade the db"""
+ """Initialize or upgrade the db"""
from sfa.storage.dbschema import DBSchema
dbschema=DBSchema()
dbschema.init_or_upgrade
version = self.api.manager.GetVersion(self.api, {})
pprinter.pprint(version)
- def slices(self):
- """List the running slices at this Aggregate"""
- print self.api.manager.ListSlices(self.api, [], {})
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
def status(self, xrn):
"""Display the status of a slice or slivers"""
urn = Xrn(xrn, 'slice').get_urn()
- status = self.api.manager.SliverStatus(self.api, urn, [], {})
+ status = self.api.manager.SliverStatus(self.api, [urn], {}, {})
pprinter.pprint(status)
- @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
@args('-r', '--rspec-version', dest='rspec_version', metavar='<rspec_version>',
default='GENI', help='version/format of the resulting rspec response')
- def resources(self, xrn=None, rspec_version='GENI'):
- """Display the available resources at an aggregate
-or the resources allocated by a slice"""
+ def resources(self, rspec_version='GENI'):
+ """Display the available resources at an aggregate"""
options = {'geni_rspec_version': rspec_version}
if xrn:
options['geni_slice_urn'] = Xrn(xrn, 'slice').get_urn()
- print options
- resources = self.api.manager.ListResources(self.api, [], options)
+ resources = self.api.manager.ListResources(self.api, {}, options)
print resources
-
+
+ @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
+ @args('-r', '--rspec-version', dest='rspec_version', metavar='<rspec_version>',
+ default='GENI', help='version/format of the resulting rspec response')
+ def describe(self, xrn, rspec_version='GENI'):
+ """Display the resources allocated by a slice or slivers"""
+ urn = Xrn(xrn, 'slice').get_urn()
+ options = {'geni_rspec_version': rspec_version}
+ status = self.api.manager.Describe(self.api, {}, [urn], options)
+ print status
+
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
@args('-r', '--rspec', dest='rspec', metavar='<rspec>', help='rspec file (mandatory)')
@args('-u', '--user', dest='user', metavar='<user>', help='hrn/urn of slice user (mandatory)')
@args('-k', '--key', dest='key', metavar='<key>', help="path to user's public key file (mandatory)")
- def create(self, xrn, rspec, user, key):
+ def allocate(self, xrn, rspec, user, key):
"""Allocate slivers"""
xrn = Xrn(xrn, 'slice')
- slice_urn=xrn.get_urn()
+ urn=xrn.get_urn()
rspec_string = open(rspec).read()
user_xrn = Xrn(user, 'user')
user_urn = user_xrn.get_urn()
user_key_string = open(key).read()
users = [{'urn': user_urn, 'keys': [user_key_string]}]
- options={}
- self.api.manager.CreateSliver(self, slice_urn, [], rspec_string, users, options)
+ options={'geni_users': users}
+ status = self.api.manager.Allocate(self.api, urn, {}, rspec_string, options)
+ print status
+
+ @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
+ def provision(self, xrns):
+ status = self.api.manager.Provision(self.api, [xrns], {}, {})
+ print status
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
def delete(self, xrn):
"""Delete slivers"""
- self.api.manager.DeleteSliver(self.api, xrn, [], {})
-
+ result = self.api.manager.DeleteSliver(self.api, [xrn], {}, {})
+ print result
+
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
- def start(self, xrn):
+ @args('-e', '--expiration', dest='expiration', metavar='<expiration>', help='Expiration date (mandatory)')
+ def renew(self, xrn, expiration):
"""Start slivers"""
- self.api.manager.start_slice(self.api, xrn, [])
+ result = self.api.manager.start_slice(self.api, xrn, {}, expiration, {})
+ print result
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
- def stop(self, xrn):
+ def shutdown(self, xrn):
"""Stop slivers"""
- self.api.manager.stop_slice(self.api, xrn, [])
+ result = self.api.manager.Shutdown(self.api, xrn, {}, {})
+ print result
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
- def reset(self, xrn):
+ @args('-a', '--action', dest='action', metavar='<action>', help='Action name (mandatory)')
+ def operation(self, xrn, action):
"""Reset sliver"""
- self.api.manager.reset_slice(self.api, xrn)
-
+ result = self.api.manager.PerformOperationalAction(self.api, [xrn], {}, action, {})
+ print result
# @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
# @args('-r', '--rspec', dest='rspec', metavar='<rspec>', help='request rspec', default=None)
CM_PORT=12346
- # utility methods here
- def optparse_listvalue_callback(option, option_string, value, parser):
- setattr(parser.values, option.dest, value.split(','))
-
- # a code fragment that could be helpful for argparse which unfortunately is
- # available with 2.7 only, so this feels like too strong a requirement for the client side
- #class ExtraArgAction (argparse.Action):
- # def __call__ (self, parser, namespace, values, option_string=None):
- # would need a try/except of course
- # (k,v)=values.split('=')
- # d=getattr(namespace,self.dest)
- # d[k]=v
- #####
- #parser.add_argument ("-X","--extra",dest='extras', default={}, action=ExtraArgAction,
- # help="set extra flags, testbed dependent, e.g. --extra enabled=true")
-
- def optparse_dictvalue_callback (option, option_string, value, parser):
- try:
- (k,v)=value.split('=',1)
- d=getattr(parser.values, option.dest)
- d[k]=v
- except:
- parser.print_help()
- sys.exit(1)
+ from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, \
+ terminal_render, filter_records
# display methods
def display_rspec(rspec, format='rspec'):
return
-def credential_printable (credential_string):
- credential=Credential(string=credential_string)
+def filter_records(type, records):
+ filtered_records = []
+ for record in records:
+ if (record['type'] == type) or (type == "all"):
+ filtered_records.append(record)
+ return filtered_records
+
+
+def credential_printable (cred):
+ credential=Credential(cred=cred)
result=""
result += credential.get_summary_tostring()
result += "\n"
rights = credential.get_privileges()
- result += "rights=%s"%rights
- result += "\n"
+ result += "type=%s\n" % credential.type
+ result += "version=%s\n" % credential.version
+ result += "rights=%s\n"%rights
return result
def show_credentials (cred_s):
("version", ""),
("list", "authority"),
("show", "name"),
- ("add", "record"),
- ("update", "record"),
+ ("add", "[record]"),
+ ("update", "[record]"),
("remove", "name"),
- ("slices", ""),
- ("resources", "[slice_hrn]"),
+ ("resources", ""),
+ ("describe", "slice_hrn"),
("create", "slice_hrn rspec"),
+ ("allocate", "slice_hrn rspec"),
+ ("provision", "slice_hrn"),
+ ("action", "slice_hrn action"),
("delete", "slice_hrn"),
("status", "slice_hrn"),
- ("start", "slice_hrn"),
- ("stop", "slice_hrn"),
- ("reset", "slice_hrn"),
("renew", "slice_hrn time"),
("shutdown", "slice_hrn"),
("get_ticket", "slice_hrn rspec"),
("redeem_ticket", "ticket"),
- ("delegate", "name"),
+ ("delegate", "to_hrn"),
("gid", "[name]"),
("trusted", "cred"),
("config", ""),
parser.add_option('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
parser.add_option('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
parser.add_option('-e', '--email', dest='email', default="", help="email (mandatory for users)")
- # use --extra instead
- # parser.add_option('-u', '--url', dest='url', metavar='<url>', default=None, help="URL, useful for slices")
- # parser.add_option('-d', '--description', dest='description', metavar='<description>',
- # help='Description, useful for slices', default=None)
parser.add_option('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
- parser.add_option('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ parser.add_option('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
parser.add_option('-r', '--researchers', dest='researchers', metavar='<researchers>',
- help='slice researchers', default='', type="str", action='callback',
+ help='Set/replace slice researchers', default='', type="str", action='callback',
callback=optparse_listvalue_callback)
- parser.add_option('-p', '--pis', dest='pis', metavar='<PIs>', help='Principal Investigators/Project Managers',
+ parser.add_option('-p', '--pis', dest='pis', metavar='<PIs>', help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
- # use --extra instead
- # parser.add_option('-f', '--firstname', dest='firstname', metavar='<firstname>', help='user first name')
- # parser.add_option('-l', '--lastname', dest='lastname', metavar='<lastname>', help='user last name')
parser.add_option ('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>",
action="callback", callback=optparse_dictvalue_callback, nargs=1,
help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
+ # user specifies remote aggregate/sm/component
+ if command in ("resources", "describe", "allocate", "provision", "create", "delete", "allocate", "provision",
+ "action", "shutdown", "get_ticket", "renew", "status"):
+ parser.add_option("-d", "--delegate", dest="delegate", default=None,
+ action="store_true",
+ help="Include a credential delegated to the user's root"+\
+ "authority in set of credentials for this call")
+
# show_credential option
- if command in ("list","resources","create","add","update","remove","slices","delete","status","renew"):
+ if command in ("list","resources", "describe", "provision", "allocate", "create","add","update","remove","slices","delete","status","renew"):
parser.add_option("-C","--credential",dest='show_credential',action='store_true',default=False,
help="show credential(s) used in human-readable form")
# registy filter option
if command in ("show"):
parser.add_option("-k","--key",dest="keys",action="append",default=[],
help="specify specific keys to be displayed from record")
- if command in ("resources"):
+ if command in ("resources", "describe"):
# rspec version
parser.add_option("-r", "--rspec-version", dest="rspec_version", default="SFA 1",
help="schema type and version of resulting RSpec")
# 'create' does return the new rspec, makes sense to save that too
- if command in ("resources", "show", "list", "gid", 'create'):
+ if command in ("resources", "describe", "allocate", "provision", "show", "list", "gid", 'create'):
parser.add_option("-o", "--output", dest="file",
help="output XML to file", metavar="FILE", default=None)
if command == 'list':
parser.add_option("-r", "--recursive", dest="recursive", action='store_true',
help="list all child records", default=False)
+ parser.add_option("-v", "--verbose", dest="verbose", action='store_true',
+ help="gives details, like user keys", default=False)
if command in ("delegate"):
parser.add_option("-u", "--user",
- action="store_true", dest="delegate_user", default=False,
- help="delegate user credential")
- parser.add_option("-s", "--slice", dest="delegate_slice",
- help="delegate slice credential", metavar="HRN", default=None)
+ action="store_true", dest="delegate_user", default=False,
+ help="delegate your own credentials; default if no other option is provided")
+ parser.add_option("-s", "--slice", dest="delegate_slices",action='append',default=[],
+ metavar="slice_hrn", help="delegate cred. for slice HRN")
+ parser.add_option("-a", "--auths", dest='delegate_auths',action='append',default=[],
+ metavar='auth_hrn', help="delegate cred for auth HRN")
+ # this primarily is a shorthand for -a my_hrn^
+ parser.add_option("-p", "--pi", dest='delegate_pi', default=None, action='store_true',
+ help="delegate your PI credentials, so s.t. like -a your_hrn^")
+ parser.add_option("-A","--to-authority",dest='delegate_to_authority',action='store_true',default=False,
+ help="""by default the mandatory argument is expected to be a user,
+ use this if you mean an authority instead""")
if command in ("version"):
parser.add_option("-R","--registry-version",
# Main: parse arguments and dispatch to command
#
def dispatch(self, command, command_options, command_args):
- return getattr(self, command)(command_options, command_args)
+ method=getattr(self, command,None)
+ if not method:
+ print "Unknown command %s"%command
+ return
+ return method(command_options, command_args)
def main(self):
self.sfi_parser = self.create_parser()
try:
self.dispatch(command, command_options, command_args)
- except KeyError:
- self.logger.critical ("Unknown command %s"%command)
+ except:
+ self.logger.log_exc ("sfi command %s failed"%command)
sys.exit(1)
return
# extract what's needed
self.private_key = client_bootstrap.private_key()
self.my_credential_string = client_bootstrap.my_credential_string ()
+ self.my_credential = {'geni_type': 'geni_sfa',
+ 'geni_version': '3.0',
+ 'geni_value': self.my_credential_string}
self.my_gid = client_bootstrap.my_gid ()
self.client_bootstrap = client_bootstrap
sys.exit(-1)
return self.client_bootstrap.authority_credential_string (self.authority)
+ def authority_credential_string(self, auth_hrn):
+ return self.client_bootstrap.authority_credential_string (auth_hrn)
+
def slice_credential_string(self, name):
return self.client_bootstrap.slice_credential_string (name)
+ def slice_credential(self, name):
+ return {'geni_type': 'geni_sfa',
+ 'geni_version': '3.0',
+ 'geni_value': self.slice_credential_string(name)}
+
+ # xxx should be supported by sfaclientbootstrap as well
+ def delegate_cred(self, object_cred, hrn, type='authority'):
+ # the gid and hrn of the object we are delegating
+ if isinstance(object_cred, str):
+ object_cred = Credential(string=object_cred)
+ object_gid = object_cred.get_gid_object()
+ object_hrn = object_gid.get_hrn()
+
+ if not object_cred.get_privileges().get_all_delegate():
+ self.logger.error("Object credential %s does not have delegate bit set"%object_hrn)
+ return
+
+ # the delegating user's gid
+ caller_gidfile = self.my_gid()
+
+ # the gid of the user who will be delegated to
+ delegee_gid = self.client_bootstrap.gid(hrn,type)
+ delegee_hrn = delegee_gid.get_hrn()
+ dcred = object_cred.delegate(delegee_gid, self.private_key, caller_gidfile)
+ return dcred.save_to_string(save_parents=True)
+
#
# Management of the servers
#
raise Exception, "Not enough parameters for the 'list' command"
# filter on person, slice, site, node, etc.
- # THis really should be in the self.filter_records funct def comment...
+ # This really should be in the self.filter_records funct def comment...
list = filter_records(options.type, list)
- for record in list:
- print "%s (%s)" % (record['hrn'], record['type'])
+ terminal_render (list, options)
if options.file:
save_records_to_file(options.file, list, options.fileformat)
return
self.print_help()
sys.exit(1)
hrn = args[0]
- record_dicts = self.registry().Resolve(hrn, self.my_credential_string)
+ # explicitly require Resolve to run in details mode
+ record_dicts = self.registry().Resolve(hrn, self.my_credential_string, {'details':True})
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
self.logger.error("No record of type %s"% options.type)
return
def add(self, options, args):
- "add record into registry from xml file (Register)"
+ "add record into registry by using the command options (Recommended) or from xml file (Register)"
auth_cred = self.my_authority_credential_string()
if options.show_credential:
show_credentials(auth_cred)
record_dict = {}
- if len(args) > 0:
- record_filepath = args[0]
- rec_file = self.get_record_file(record_filepath)
- record_dict.update(load_record_from_file(rec_file).todict())
+ if len(args) > 1:
+ self.print_help()
+ sys.exit(1)
+ if len(args)==1:
+ try:
+ record_filepath = args[0]
+ rec_file = self.get_record_file(record_filepath)
+ record_dict.update(load_record_from_file(rec_file).todict())
+ except:
+ print "Cannot load record file %s"%record_filepath
+ sys.exit(1)
if options:
record_dict.update(load_record_from_opts(options).todict())
# we should have a type by now
return self.registry().Register(record_dict, auth_cred)
def update(self, options, args):
- "update record into registry from xml file (Update)"
+ "update record into registry by using the command options (Recommended) or from xml file (Update)"
record_dict = {}
if len(args) > 0:
record_filepath = args[0]
server = self.sliceapi()
# creds
creds = [self.my_credential_string]
- if options.delegate:
- delegated_cred = self.delegate_cred(self.my_credential_string, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
- api_options['call_id']=unique_call_id()
+ api_options['call_id']=unique_call_id()
if options.show_credential:
show_credentials(creds)
result = server.ListSlices(creds, *self.ois(server,api_options))
# show rspec for named slice
def resources(self, options, args):
"""
- with no arg, discover available resources, (ListResources)
+ discover available resources
or with an slice hrn, shows currently provisioned resources
"""
server = self.sliceapi()
# set creds
- creds = []
- if args:
- the_credential=self.slice_credential_string(args[0])
- creds.append(the_credential)
- else:
- the_credential=self.my_credential_string
- creds.append(the_credential)
+ creds = [self.my_credential]
+ if options.delegate:
+ creds.append(self.delegate_cred(cred, get_authority(self.authority)))
if options.show_credential:
show_credentials(creds)
api_options ['call_id'] = unique_call_id()
# ask for cached value if available
api_options ['cached'] = True
- if args:
- hrn = args[0]
- api_options['geni_slice_urn'] = hrn_to_urn(hrn, 'slice')
if options.info:
api_options['info'] = options.info
if options.list_leases:
return
+ def describe(self, options, args):
+ """
+ Shows currently provisioned resources.
+ """
+ server = self.sliceapi()
+
+ # set creds
+ creds = [self.slice_credential(args[0])]
+ if options.delegate:
+ creds.append(self.delegate_cred(cred, get_authority(self.authority)))
+ if options.show_credential:
+ show_credentials(creds)
+
+ api_options = {'call_id': unique_call_id(),
+ 'cached': True,
+ 'info': options.info,
+ 'list_leases': options.list_leases,
+ 'geni_rspec_version': {'type': 'geni', 'version': '3.0'},
+ }
+ if options.rspec_version:
+ version_manager = VersionManager()
+ server_version = self.get_cached_server_version(server)
+ if 'sfa' in server_version:
+ # just request the version the client wants
+ api_options['geni_rspec_version'] = version_manager.get_version(options.rspec_version).to_dict()
+ else:
+ api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
+ urn = Xrn(args[0], type='slice').get_urn()
+ result = server.Describe([urn], creds, api_options)
+ value = ReturnValue.get_value(result)
+ if self.options.raw:
+ save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+ if options.file is not None:
+ save_rspec_to_file(value, options.file)
+ if (self.options.raw is None) and (options.file is None):
+ display_rspec(value, options.format)
+
+ return
+
def create(self, options, args):
"""
create or update named slice with given rspec
# keys: [<ssh key A>, <ssh key B>]
# }]
users = []
+ # xxx Thierry 2012 sept. 21
+ # contrary to what I was first thinking, calling Resolve with details=False does not yet work properly here
+ # I am turning details=True on again on a - hopefully - temporary basis, just to get this whole thing to work again
slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
- if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]:
+ # slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string], {'details':True})
+ if slice_records and 'reg-researchers' in slice_records[0] and slice_records[0]['reg-researchers']:
slice_record = slice_records[0]
- user_hrns = slice_record['researcher']
+ user_hrns = slice_record['reg-researchers']
user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
# do not append users, keys, or slice tags. Anything
# not contained in this request will be removed from the slice
- # CreateSliver has supported the options argument for a while now so it should
- # be safe to assume this server support it
api_options = {}
api_options ['append'] = False
api_options ['call_id'] = unique_call_id()
slice_urn = hrn_to_urn(slice_hrn, 'slice')
# creds
- slice_cred = self.slice_credential_string(slice_hrn)
+ slice_cred = self.slice_credential(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
api_options ['call_id'] = unique_call_id()
if options.show_credential:
show_credentials(creds)
- result = server.DeleteSliver(slice_urn, creds, *self.ois(server, api_options ) )
+ result = server.Delete([slice_urn], creds, *self.ois(server, api_options ) )
value = ReturnValue.get_value(result)
if self.options.raw:
save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
else:
print value
- return value
-
+ return value
+
+ def allocate(self, options, args):
+ server = self.sliceapi()
+ server_version = self.get_cached_server_version(server)
+ slice_hrn = args[0]
+ slice_urn = Xrn(slice_hrn, type='slice').get_urn()
+
+ # credentials
+ creds = [self.slice_credential(slice_hrn)]
+
+ delegated_cred = None
+ if server_version.get('interface') == 'slicemgr':
+ # delegate our cred to the slice manager
+ # do not delegate cred to slicemgr...not working at the moment
+ pass
+ #if server_version.get('hrn'):
+ # delegated_cred = self.delegate_cred(slice_cred, server_version['hrn'])
+ #elif server_version.get('urn'):
+ # delegated_cred = self.delegate_cred(slice_cred, urn_to_hrn(server_version['urn']))
+
+ if options.show_credential:
+ show_credentials(creds)
+
+ # rspec
+ rspec_file = self.get_rspec_file(args[1])
+ rspec = open(rspec_file).read()
+ api_options = {}
+ api_options ['call_id'] = unique_call_id()
+ result = server.Allocate(slice_urn, creds, rspec, api_options)
+ value = ReturnValue.get_value(result)
+ if self.options.raw:
+ save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+ if options.file is not None:
+ save_rspec_to_file (value, options.file)
+ if (self.options.raw is None) and (options.file is None):
+ print value
+
+ return value
+
+
+ def provision(self, options, args):
+ server = self.sliceapi()
+ server_version = self.get_cached_server_version(server)
+ slice_hrn = args[0]
+ slice_urn = Xrn(slice_hrn, type='slice').get_urn()
+
+ # credentials
+ creds = [self.slice_credential(slice_hrn)]
+ delegated_cred = None
+ if server_version.get('interface') == 'slicemgr':
+ # delegate our cred to the slice manager
+ # do not delegate cred to slicemgr...not working at the moment
+ pass
+ #if server_version.get('hrn'):
+ # delegated_cred = self.delegate_cred(slice_cred, server_version['hrn'])
+ #elif server_version.get('urn'):
+ # delegated_cred = self.delegate_cred(slice_cred, urn_to_hrn(server_version['urn']))
+
+ if options.show_credential:
+ show_credentials(creds)
+
+ api_options = {}
+ api_options ['call_id'] = unique_call_id()
+
+ # set the requtested rspec version
+ version_manager = VersionManager()
+ rspec_version = version_manager._get_version('geni', '3.0').to_dict()
+ api_options['geni_rspec_version'] = rspec_version
+
+ # users
+ # need to pass along user keys to the aggregate.
+ # users = [
+ # { urn: urn:publicid:IDN+emulab.net+user+alice
+ # keys: [<ssh key A>, <ssh key B>]
+ # }]
+ users = []
+ slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
+ if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]:
+ slice_record = slice_records[0]
+ user_hrns = slice_record['researcher']
+ user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
+ user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
+ users = pg_users_arg(user_records)
+
+ api_options['geni_users'] = users
+ result = server.Provision([slice_urn], creds, api_options)
+ value = ReturnValue.get_value(result)
+ if self.options.raw:
+ save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
+ if options.file is not None:
+ save_rspec_to_file (value, options.file)
+ if (self.options.raw is None) and (options.file is None):
+ print value
+ return value
+
def status(self, options, args):
"""
retrieve slice status (SliverStatus)
slice_urn = hrn_to_urn(slice_hrn, 'slice')
# creds
- slice_cred = self.slice_credential_string(slice_hrn)
+ slice_cred = self.slice_credential(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
api_options['call_id']=unique_call_id()
if options.show_credential:
show_credentials(creds)
- result = server.SliverStatus(slice_urn, creds, *self.ois(server,api_options))
+ result = server.Status([slice_urn], creds, *self.ois(server,api_options))
value = ReturnValue.get_value(result)
if self.options.raw:
save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
# cred
slice_cred = self.slice_credential_string(args[0])
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# xxx Thierry - does this not need an api_options as well ?
result = server.Start(slice_urn, creds)
value = ReturnValue.get_value(result)
# cred
slice_cred = self.slice_credential_string(args[0])
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
result = server.Stop(slice_urn, creds)
value = ReturnValue.get_value(result)
if self.options.raw:
return value
# reset named slice
- def reset(self, options, args):
+ def action(self, options, args):
"""
- reset named slice (reset_slice)
+ Perform the named operational action on the named slivers
"""
server = self.sliceapi()
+ api_options = {}
# slice urn
slice_hrn = args[0]
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ action = args[1]
+ slice_urn = Xrn(slice_hrn, type='slice').get_urn()
# cred
- slice_cred = self.slice_credential_string(args[0])
+ slice_cred = self.slice_credential(args[0])
creds = [slice_cred]
- result = server.reset_slice(creds, slice_urn)
+ if options.delegate:
+ delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
+ creds.append(delegated_cred)
+
+ result = server.PerformOperationalAction([slice_urn], creds, action , api_options)
value = ReturnValue.get_value(result)
if self.options.raw:
save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
slice_urn = hrn_to_urn(slice_hrn, 'slice')
# time: don't try to be smart on the time format, server-side will
# creds
- slice_cred = self.slice_credential_string(args[0])
+ slice_cred = self.slice_credential(args[0])
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
- api_options['call_id']=unique_call_id()
+ api_options['call_id']=unique_call_id()
if options.show_credential:
show_credentials(creds)
- result = server.RenewSliver(slice_urn, creds, input_time, *self.ois(server,api_options))
+ result = server.Renew([slice_urn], creds, input_time, *self.ois(server,api_options))
value = ReturnValue.get_value(result)
if self.options.raw:
save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
slice_hrn = args[0]
slice_urn = hrn_to_urn(slice_hrn, 'slice')
# creds
- slice_cred = self.slice_credential_string(slice_hrn)
+ slice_cred = self.slice_credential(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
result = server.Shutdown(slice_urn, creds)
value = ReturnValue.get_value(result)
if self.options.raw:
# creds
slice_cred = self.slice_credential_string(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# rspec
rspec_file = self.get_rspec_file(rspec_path)
rspec = open(rspec_file).read()
# options and call_id when supported
api_options = {}
- api_options['call_id']=unique_call_id()
+ api_options['call_id']=unique_call_id()
# get ticket at the server
ticket_string = server.GetTicket(slice_urn, creds, rspec, *self.ois(server,api_options))
# save
self.print_help()
sys.exit(1)
target_hrn = args[0]
- gid = self.registry().CreateGid(self.my_credential_string, target_hrn, self.client_bootstrap.my_gid_string())
+ my_gid_string = open(self.client_bootstrap.my_gid()).read()
+ gid = self.registry().CreateGid(self.my_credential_string, target_hrn, my_gid_string)
if options.file:
filename = options.file
else:
GID(string=gid).save_to_file(filename)
- def delegate(self, options, args):
+ def delegate (self, options, args):
"""
(locally) create delegate credential for use by given hrn
"""
- delegee_hrn = args[0]
- if options.delegate_user:
- cred = self.delegate_cred(self.my_credential_string, delegee_hrn, 'user')
- elif options.delegate_slice:
- slice_cred = self.slice_credential_string(options.delegate_slice)
- cred = self.delegate_cred(slice_cred, delegee_hrn, 'slice')
- else:
- self.logger.warning("Must specify either --user or --slice <hrn>")
- return
- delegated_cred = Credential(string=cred)
- object_hrn = delegated_cred.get_gid_object().get_hrn()
+ if len(args) != 1:
+ self.print_help()
+ sys.exit(1)
+ to_hrn = args[0]
+ # support for several delegations in the same call
+ # so first we gather the things to do
+ tuples=[]
+ for slice_hrn in options.delegate_slices:
+ message="%s.slice"%slice_hrn
+ original = self.slice_credential_string(slice_hrn)
+ tuples.append ( (message, original,) )
+ if options.delegate_pi:
+ my_authority=self.authority
+ message="%s.pi"%my_authority
+ original = self.my_authority_credential_string()
+ tuples.append ( (message, original,) )
+ for auth_hrn in options.delegate_auths:
+ message="%s.auth"%auth_hrn
+ original=self.authority_credential_string(auth_hrn)
+ tuples.append ( (message, original, ) )
+ # if nothing was specified at all at this point, let's assume -u
+ if not tuples: options.delegate_user=True
+ # this user cred
if options.delegate_user:
- dest_fn = os.path.join(self.options.sfi_dir, get_leaf(delegee_hrn) + "_"
- + get_leaf(object_hrn) + ".cred")
- elif options.delegate_slice:
- dest_fn = os.path.join(self.options.sfi_dir, get_leaf(delegee_hrn) + "_slice_"
- + get_leaf(object_hrn) + ".cred")
-
- delegated_cred.save_to_file(dest_fn, save_parents=True)
-
- self.logger.info("delegated credential for %s to %s and wrote to %s"%(object_hrn, delegee_hrn,dest_fn))
+ message="%s.user"%self.user
+ original = self.my_credential_string
+ tuples.append ( (message, original, ) )
+
+ # default type for beneficial is user unless -A
+ if options.delegate_to_authority: to_type='authority'
+ else: to_type='user'
+
+ # let's now handle all this
+ # it's all in the filenaming scheme
+ for (message,original) in tuples:
+ delegated_string = self.client_bootstrap.delegate_credential_string(original, to_hrn, to_type)
+ delegated_credential = Credential (string=delegated_string)
+ filename = os.path.join ( self.options.sfi_dir,
+ "%s_for_%s.%s.cred"%(message,to_hrn,to_type))
+ delegated_credential.save_to_file(filename, save_parents=True)
+ self.logger.info("delegated credential for %s to %s and wrote to %s"%(message,to_hrn,filename))
def trusted(self, options, args):
"""
['person_id', 'email', 'key_ids', 'site_ids', 'role_ids'])
# create a hash of persons by person_id
persons_by_id = dict ( [ ( person['person_id'], person) for person in persons ] )
+ # also gather non-enabled user accounts so as to issue relevant warnings
+ disabled_persons = shell.GetPersons({'peer_id': None, 'enabled': False}, ['person_id'])
+ disabled_person_ids = [ person['person_id'] for person in disabled_persons ]
# Get all plc public keys
# accumulate key ids for keys retrieval
key_ids = []
except:
# if the site import fails then there is no point in trying to import the
# site's child records (node, slices, persons), so skip them.
- self.logger.log_exc("PlImporter: failed to import site. Skipping child records")
+ self.logger.log_exc("PlImporter: failed to import site %s. Skipping child records"%site_hrn)
continue
else:
# xxx update the record ...
self.logger.info("PlImporter: imported node: %s" % node_record)
self.remember_record (node_record)
except:
- self.logger.log_exc("PlImporter: failed to import node")
+ self.logger.log_exc("PlImporter: failed to import node %s"%node_hrn)
+ continue
else:
# xxx update the record ...
pass
node_record.stale=False
- site_pis=[]
+ site_pis=set()
# import persons
for person_id in site['person_ids']:
- try:
- person = persons_by_id[person_id]
- except:
- self.logger.warning ("PlImporter: cannot locate person_id %s - ignored"%person_id)
+ proceed=False
+ if person_id in persons_by_id:
+ person=persons_by_id[person_id]
+ proceed=True
+ elif person_id in disabled_person_ids:
+ pass
+ else:
+ self.logger.warning ("PlImporter: cannot locate person_id %s in site %s - ignored"%(person_id,site_hrn))
+ # make sure to NOT run this if anything is wrong
+ if not proceed: continue
+
person_hrn = email_to_hrn(site_hrn, person['email'])
# xxx suspicious again
if len(person_hrn) > 64: person_hrn = person_hrn[:64]
(pubkey,pkey) = init_person_key (person, plc_keys )
person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey, email=person['email'])
user_record = RegUser (hrn=person_hrn, gid=person_gid,
- pointer=person['person_id'],
- authority=get_authority(person_hrn),
- email=person['email'])
+ pointer=person['person_id'],
+ authority=get_authority(person_hrn),
+ email=person['email'])
if pubkey:
user_record.reg_keys=[RegKey (pubkey['key'], pubkey['key_id'])]
else:
self.remember_record ( user_record )
else:
# update the record ?
- # if user's primary key has changed then we need to update the
+ #
+ # if a user key has changed then we need to update the
# users gid by forcing an update here
+ #
+ # right now, SFA only has *one* key attached to a user, and this is
+ # the key that the GID was made with
+ # so the logic here is, we consider that things are OK (unchanged) if
+ # all the SFA keys are present as PLC keys
+ # otherwise we trigger the creation of a new gid from *some* plc key
+ # and record this on the SFA side
+ # it would make sense to add a feature in PLC so that one could pick a 'primary'
+ # key but this is not available on the myplc side for now
+ # = or = it would be much better to support several keys in SFA but that
+ # does not seem doable without a major overhaul in the data model as
+ # a GID is attached to a hrn, but it's also linked to a key, so...
+ # NOTE: with this logic, the first key entered in PLC remains the one
+ # current in SFA until it is removed from PLC
sfa_keys = user_record.reg_keys
- def key_in_list (key,sfa_keys):
- for reg_key in sfa_keys:
- if reg_key.key==key['key']: return True
+ def sfa_key_in_list (sfa_key,plc_keys):
+ for plc_key in plc_keys:
+ if plc_key['key']==sfa_key.key:
+ return True
return False
- # is there a new key in myplc ?
+ # are all the SFA keys known to PLC ?
new_keys=False
- for key in plc_keys:
- if not key_in_list (key,sfa_keys):
- new_keys = True
+ if not sfa_keys and plc_keys:
+ new_keys=True
+ else:
+ for sfa_key in sfa_keys:
+ if not sfa_key_in_list (sfa_key,plc_keys):
+ new_keys = True
if new_keys:
(pubkey,pkey) = init_person_key (person, plc_keys)
person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ person_gid.set_email(person['email'])
if not pubkey:
user_record.reg_keys=[]
else:
user_record.reg_keys=[ RegKey (pubkey['key'], pubkey['key_id'])]
+ user_record.gid = person_gid
+ user_record.just_updated()
self.logger.info("PlImporter: updated person: %s" % user_record)
user_record.email = person['email']
dbsession.commit()
# this is valid for all sites she is in..
# PI is coded with role_id==20
if 20 in person['role_ids']:
- site_pis.append (user_record)
+ site_pis.add (user_record)
except:
self.logger.log_exc("PlImporter: failed to import person %d %s"%(person['person_id'],person['email']))
# maintain the list of PIs for a given site
- # for the record, Jordan had proposed the following addition as a welcome hotfix to a previous version:
- # site_pis = list(set(site_pis))
- # this was likely due to a bug in the above logic, that had to do with disabled persons
- # being improperly handled, and where the whole loop on persons
- # could be performed twice with the same person...
- # so hopefully we do not need to eliminate duplicates explicitly here anymore
+ site_record.reg_pis = list(site_pis)
+ site_record.reg_pis = site_pis
dbsession.commit()
# import slices
self.logger.info("PlImporter: imported slice: %s" % slice_record)
self.remember_record ( slice_record )
except:
- self.logger.log_exc("PlImporter: failed to import slice")
+ self.logger.log_exc("PlImporter: failed to import slice %s (%s)"%(slice_hrn,slice['name']))
else:
+ # update the pointer if it has changed
+ if slice_id != slice_record.pointer:
+ self.logger.info("updating record (slice) pointer")
+ slice_record.pointer = slice_id
+ dbsession.commit()
# xxx update the record ...
- # given that we record the current set of users anyways, there does not seem to be much left to do here
- # self.logger.warning ("Slice update not yet implemented on slice %s (%s)"%(slice_hrn,slice['name']))
- pass
+ #self.logger.warning ("Slice update not yet implemented")
# record current users affiliated with the slice
slice_record.reg_researchers = \
[ self.locate_by_type_pointer ('user',user_id) for user_id in slice['person_ids'] ]
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
- from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey
+ from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey, \
+ augment_with_sfa_builtins
from sfa.storage.alchemy import dbsession
+ ### the types that we need to exclude from sqlobjects before being able to dump
+ # them on the xmlrpc wire
+ from sqlalchemy.orm.collections import InstrumentedList
class RegistryManager:
hrn = urn_to_hrn(xrn)[0]
else:
hrn, type = urn_to_hrn(xrn)
-
+
+ # Slivers don't have credentials but users should be able to
+ # specify a sliver xrn and receive the slice's credential
+ if type == 'sliver' or '-' in Xrn(hrn).leaf:
+ slice_xrn = self.driver.sliver_to_slice_xrn(hrn)
+ hrn = slice_xrn.hrn
+
# Is this a root or sub authority
auth_hrn = api.auth.get_authority(hrn)
if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
- # xxx for the record only
- # used to call this, which was wrong, now all needed data is natively is our DB
- # self.driver.augment_records_with_testbed_info (record.__dict__)
- # likewise, we deprecate is_enabled which was not really useful
- # if not self.driver.is_enabled (record.__dict__): ...
- # xxx for the record only
-
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
# object itself.
return new_cred.save_to_string(save_parents=True)
- def Resolve(self, api, xrns, type=None, full=True):
+ # the default for full, which means 'dig into the testbed as well', should be false
+ def Resolve(self, api, xrns, type=None, details=False):
if not isinstance(xrns, types.ListType):
# try to infer type if not set and we get a single input
credential = api.getCredential()
interface = api.registries[registry_hrn]
server_proxy = api.server_proxy(interface, credential)
- peer_records = server_proxy.Resolve(xrns, credential,type)
+ # should propagate the details flag but that's not supported in the xmlrpc interface yet
+ #peer_records = server_proxy.Resolve(xrns, credential,type, details=details)
+ peer_records = server_proxy.Resolve(xrns, credential)
# pass foreign records as-is
# previous code used to read
# records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
if type:
local_records = local_records.filter_by(type=type)
local_records=local_records.all()
- logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type))
+
+ for local_record in local_records:
+ augment_with_sfa_builtins (local_record)
+
+ logger.info("Resolve, (details=%s,type=%s) local_records=%s "%(details,type,local_records))
local_dicts = [ record.__dict__ for record in local_records ]
- if full:
- # in full mode we get as much info as we can, which involves contacting the
+ if details:
+ # in details mode we get as much info as we can, which involves contacting the
# testbed for getting implementation details about the record
self.driver.augment_records_with_testbed_info(local_dicts)
# also we fill the 'url' field for known authorities
# xxx somehow here calling dict(record) issues a weird error
# however record.todict() seems to work fine
# records.extend( [ dict(record) for record in local_records ] )
- records.extend( [ record.todict() for record in local_records ] )
+ records.extend( [ record.todict(exclude_types=[InstrumentedList]) for record in local_records ] )
+
if not records:
raise RecordNotFound(str(hrns))
record_dicts = record_list
# if we still have not found the record yet, try the local registry
+ # logger.debug("before trying local records, %d foreign records"% len(record_dicts))
if not record_dicts:
recursive = False
if ('recursive' in options and options['recursive']):
if not api.auth.hierarchy.auth_exists(hrn):
raise MissingAuthority(hrn)
if recursive:
- records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn))
+ records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn)).all()
+ # logger.debug("recursive mode, found %d local records"%(len(records)))
else:
- records = dbsession.query(RegRecord).filter_by(authority=hrn)
- record_dicts=[ record.todict() for record in records ]
+ records = dbsession.query(RegRecord).filter_by(authority=hrn).all()
+ # logger.debug("non recursive mode, found %d local records"%(len(records)))
+ # so that sfi list can show more than plain names...
+ for record in records: augment_with_sfa_builtins (record)
+ record_dicts=[ record.todict(exclude_types=[InstrumentedList]) for record in records ]
return record_dicts
####################
# utility for handling relationships among the SFA objects
- # given that the SFA db does not handle this sort of relationsships
- # it will rely on side-effects in the testbed to keep this persistent
# subject_record describes the subject of the relationships
# ref_record contains the target values for the various relationships we need to manage
- # (to begin with, this is just the slice x person relationship)
+ # (to begin with, this is just the slice x person (researcher) and authority x person (pi) relationships)
def update_driver_relations (self, subject_obj, ref_obj):
type=subject_obj.type
#for (k,v) in subject_obj.__dict__.items(): print k,'=',v
# is there a change in keys ?
new_key=None
if type=='user':
- if getattr(new_key,'keys',None):
+ if getattr(new_record,'keys',None):
new_key=new_record.keys
if isinstance (new_key,types.ListType):
new_key=new_key[0]
urn = hrn_to_urn(hrn,type)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
gid = gid_object.save_to_string(save_parents=True)
- record.gid = gid
- dsession.commit()
# xxx should do side effects from new_record to record
# not too sure how to do that
if isinstance (record, RegSlice):
researcher_hrns = getattr(new_record,'researcher',None)
if researcher_hrns is not None: record.update_researchers (researcher_hrns)
- dbsession.commit()
elif isinstance (record, RegAuthority):
pi_hrns = getattr(new_record,'pi',None)
if pi_hrns is not None: record.update_pis (pi_hrns)
- dbsession.commit()
# update the PLC information that was specified with the record
# xxx oddly enough, without this useless statement,
# anyway the driver should receive an object
# (and then extract __dict__ itself if needed)
print "DO NOT REMOVE ME before driver.update, record=%s"%record
- if not self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key):
- logger.warning("driver.update failed")
-
+ new_key_pointer = -1
+ try:
+ (pointer, new_key_pointer) = self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key)
+ except:
+ pass
+ if new_key and new_key_pointer:
+ record.reg_keys=[ RegKey (new_key, new_key_pointer)]
+ record.gid = gid
+
+ dbsession.commit()
# update membership for researchers, pis, owners, operators
self.update_driver_relations (record, new_record)
import base64
import string
import random
-import time
+import time
from collections import defaultdict
from nova.exception import ImageNotFound
from nova.api.ec2.cloud import CloudController
-from sfa.util.faults import SfaAPIError, InvalidRSpec
+from sfa.util.faults import SfaAPIError, SliverDoesNotExist
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.elements.hardware_type import HardwareType
from sfa.rspecs.elements.node import Node
from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
from sfa.rspecs.version_manager import VersionManager
from sfa.openstack.security_group import SecurityGroup
+ from sfa.server.threadmanager import ThreadManager
from sfa.util.sfalogging import logger
def pubkeys_to_user_data(pubkeys):
user_data += "\n"
return user_data
-def instance_to_sliver(instance, slice_xrn=None):
- sliver_id = None
- sliver = Sliver({'name': instance.name,
- 'type': instance.name,
- 'cpus': str(instance.vcpus),
- 'memory': str(instance.ram),
- 'storage': str(instance.disk)})
- return sliver
-
def image_to_rspec_disk_image(image):
img = DiskImage()
img['name'] = image['name']
def __init__(self, driver):
self.driver = driver
- def get_rspec(self, slice_xrn=None, version=None, options={}):
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- if not slice_xrn:
- rspec_version = version_manager._get_version(version.type, version.version, 'ad')
- nodes = self.get_aggregate_nodes()
- else:
- rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
- nodes = self.get_slice_nodes(slice_xrn)
- rspec = RSpec(version=rspec_version, user_options=options)
- rspec.version.add_nodes(nodes)
- return rspec.toxml()
-
def get_availability_zones(self):
- # essex release
zones = self.driver.shell.nova_manager.dns_domains.domains()
-
if not zones:
zones = ['cloud']
else:
zones = [zone.name for zone in zones]
return zones
- def instance_to_rspec_node(self, slice_xrn, instance):
+ def list_resources(self, version=None, options={}):
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ rspec = RSpec(version=version, user_options=options)
+ nodes = self.get_aggregate_nodes()
+ rspec.version.add_nodes(nodes)
+ return rspec.toxml()
+
+ def describe(self, urns, version=None, options={}):
+ # update nova connection
+ tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+ instances = self.get_instances(urns)
+ # lookup the sliver allocations
+ sliver_ids = [sliver['sliver_id'] for sliver in slivers]
+ constraint = SliverAllocation.sliver_id.in_(sliver_ids)
+ sliver_allocations = dbsession.query(SliverAllocation).filter(constraint)
+ sliver_allocation_dict = {}
+ for sliver_allocation in sliver_allocations:
+ sliver_allocation_dict[sliver_allocation.sliver_id] = sliver_allocation
+
+ geni_slivers = []
+ rspec_nodes = []
+ for instance in instances:
+ rspec_nodes.append(self.instance_to_rspec_node(instance))
+ geni_sliver = self.instance_to_geni_sliver(instance, sliver_sllocation_dict)
+ geni_slivers.append(geni_sliver)
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+ rspec = RSpec(version=rspec_version, user_options=options)
+ rspec.xml.set('expires', datetime_to_string(utcparse(time.time())))
+ rspec.version.add_nodes(rspec_nodes)
+ result = {'geni_urn': Xrn(urns[0]).get_urn(),
+ 'geni_rspec': rspec.toxml(),
+ 'geni_slivers': geni_slivers}
+
+ return result
+
+ def get_instances(self, urns):
+ # parse slice names and sliver ids
+ names = set()
+ ids = set()
+ for urn in urns:
+ xrn = OSXrn(xrn=urn)
+ if xrn.type == 'slice':
+ names.add(xrn.get_slice_name())
+ elif xrn.type == 'sliver':
+ ids.add(xrn.leaf)
+
+ # look up instances
+ instances = []
+ filter = {}
+ if names:
+ filter['name'] = names
+ if ids:
+ filter['id'] = ids
+ servers = self.driver.shell.nova_manager.servers.findall(**filter)
+ instances.extend(servers)
+
+ return instances
+
+ def instance_to_rspec_node(self, instance):
# determine node urn
node_xrn = instance.metadata.get('component_id')
if not node_xrn:
rspec_node['component_id'] = node_xrn.urn
rspec_node['component_name'] = node_xrn.name
rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['sliver_id'] = OSXrn(name=instance.name, type='slice', id=instance.id).get_urn()
if instance.metadata.get('client_id'):
rspec_node['client_id'] = instance.metadata.get('client_id')
# get sliver details
- sliver_xrn = OSXrn(xrn=slice_xrn, type='slice', id=instance.id)
- rspec_node['sliver_id'] = sliver_xrn.get_urn()
flavor = self.driver.shell.nova_manager.flavors.find(id=instance.flavor['id'])
- sliver = instance_to_sliver(flavor)
+ sliver = self.instance_to_sliver(flavor)
# get firewall rules
fw_rules = []
group_name = instance.metadata.get('security_groups')
'port_range': port_range,
'cidr_ip': rule['ip_range']['cidr']})
fw_rules.append(fw_rule)
- sliver['fw_rules'] = fw_rules
- rspec_node['slivers']= [sliver]
+ sliver['fw_rules'] = fw_rules
+ rspec_node['slivers'] = [sliver]
+
# get disk image
image = self.driver.shell.image_manager.get_images(id=instance.image['id'])
if isinstance(image, list) and len(image) > 0:
disk_image = image_to_rspec_disk_image(image)
sliver['disk_image'] = [disk_image]
- # get interfaces
+ # get interfaces
rspec_node['services'] = []
rspec_node['interfaces'] = []
addresses = instance.addresses
- # HACK: public ips are stored in the list of private, but
- # this seems wrong. Assume pub ip is the last in the list of
- # private ips until openstack bug is fixed.
+ # HACK: public ips are stored in the list of private, but
+ # this seems wrong. Assume pub ip is the last in the list of
+ # private ips until openstack bug is fixed.
if addresses.get('private'):
login = Login({'authentication': 'ssh-keys',
'hostname': addresses.get('private')[-1]['addr'],
'port':'22', 'username': 'root'})
service = Services({'login': login})
- rspec_node['services'].append(service)
-
- if_index = 0
+ rspec_node['services'].append(service)
+
for private_ip in addresses.get('private', []):
- if_xrn = PlXrn(auth=self.driver.hrn,
- interface='node%s' % (instance.hostId))
+ if_xrn = PlXrn(auth=self.driver.hrn,
+ interface='node%s' % (instance.hostId))
if_client_id = Xrn(if_xrn.urn, type='interface', id="eth%s" %if_index).urn
if_sliver_id = Xrn(rspec_node['sliver_id'], type='slice', id="eth%s" %if_index).urn
interface = Interface({'component_id': if_xrn.urn,
'sliver_id': if_sliver_id})
interface['ips'] = [{'address': private_ip['addr'],
#'netmask': private_ip['network'],
- 'type': 'ipv%s' % str(private_ip['version'])}]
- rspec_node['interfaces'].append(interface)
- if_index += 1
-
+ 'type': private_ip['version']}]
+ rspec_node['interfaces'].append(interface)
+
# slivers always provide the ssh service
for public_ip in addresses.get('public', []):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': public_ip['addr'],
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': public_ip['addr'],
'port':'22', 'username': 'root'})
service = Services({'login': login})
rspec_node['services'].append(service)
- return rspec_node
+ return rspec_node
- def get_slice_nodes(self, slice_xrn):
- # update nova connection
- tenant_name = OSXrn(xrn=slice_xrn, type='slice').get_tenant_name()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
-
- zones = self.get_availability_zones()
- name = hrn_to_os_slicename(slice_xrn)
- instances = self.driver.shell.nova_manager.servers.findall(name=name)
- rspec_nodes = []
- for instance in instances:
- rspec_nodes.append(self.instance_to_rspec_node(slice_xrn, instance))
- return rspec_nodes
-
+ def instance_to_sliver(self, instance, xrn=None):
+ if xrn:
+ sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
+ sliver_id = Xrn(sliver_hrn, type='sliver').urn
+
+ sliver = Sliver({'sliver_id': sliver_id,
+ 'name': instance.name,
+ 'type': instance.name,
+ 'cpus': str(instance.vcpus),
+ 'memory': str(instance.ram),
+ 'storage': str(instance.disk)})
+ return sliver
+
+ def instance_to_geni_sliver(self, instance, sliver_allocations = {}):
+ sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
+ sliver_id = Xrn(sliver_hrn, type='sliver').urn
+
+ # set sliver allocation and operational status
+ sliver_allocation = sliver_allocations[sliver_id]
+ if sliver_allocation:
+ allocation_status = sliver_allocation.allocation_state
+ if allocation_status == 'geni_allocated':
+ op_status = 'geni_pending_allocation'
+ elif allocation_status == 'geni_provisioned':
+ state = instance.state.lower()
+ if state == 'active':
+ op_status = 'geni_ready'
+ elif state == 'building':
+ op_status = 'geni_notready'
+ elif state == 'failed':
+ op_status =' geni_failed'
+ else:
+ op_status = 'geni_unknown'
+ else:
+ allocation_status = 'geni_unallocated'
+ # required fields
+ geni_sliver = {'geni_sliver_urn': sliver_id,
+ 'geni_expires': None,
+ 'geni_allocation_status': allocation_status,
+ 'geni_operational_status': op_status,
+ 'geni_error': None,
+ 'plos_created_at': datetime_to_string(utcparse(instance.created)),
+ 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
+ }
+
+ return geni_sliver
+
def get_aggregate_nodes(self):
zones = self.get_availability_zones()
# available sliver/instance/vm types
HardwareType({'name': 'pc'})]
slivers = []
for instance in instances:
- sliver = instance_to_sliver(instance)
+ sliver = self.instance_to_sliver(instance)
sliver['disk_image'] = disk_images
slivers.append(sliver)
-
+ rspec_node['available'] = 'true'
rspec_node['slivers'] = slivers
rspec_nodes.append(rspec_node)
return rspec_nodes
-
def create_tenant(self, tenant_name):
tenants = self.driver.shell.auth_manager.tenants.findall(name=tenant_name)
if not tenants:
tenant = tenants[0]
return tenant
-
def create_instance_key(self, slice_hrn, user):
slice_name = Xrn(slice_hrn).leaf
user_name = Xrn(user['urn']).leaf
cidr_ip = rule.get('cidr_ip'),
port_range = rule.get('port_range'),
icmp_type_code = rule.get('icmp_type_code'))
+ # Open ICMP by default
+ security_group.add_rule_to_group(group_name,
+ protocol = "icmp",
+ cidr_ip = "0.0.0.0/0",
+ icmp_type_code = "-1:-1")
return group_name
def add_rule_to_security_group(self, group_name, **kwds):
files = {'/root/.ssh/authorized_keys': authorized_keys}
rspec = RSpec(rspec)
requested_instances = defaultdict(list)
+
# iterate over clouds/zones/nodes
- created_instances = []
+ slivers = []
for node in rspec.version.get_nodes_with_slivers():
instances = node.get('slivers', [])
if not instances:
image = instance.get('disk_image')
if image and isinstance(image, list):
image = image[0]
+ else:
+ raise InvalidRSpec("Must specify a disk_image for each VM")
image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
fw_rules = instance.get('fw_rules', [])
group_name = self.create_security_group(instance_name, fw_rules)
if node.get('component_id'):
metadata['component_id'] = node['component_id']
if node.get('client_id'):
- metadata['client_id'] = node['client_id']
- server = self.driver.shell.nova_manager.servers.create(flavor=flavor_id,
+ metadata['client_id'] = node['client_id']
+ server = self.driver.shell.nova_manager.servers.create(
+ flavor=flavor_id,
image=image_id,
key_name = key_name,
security_groups = [group_name],
files=files,
meta=metadata,
name=instance_name)
- created_instances.append(server)
-
+ slivers.append(server)
except Exception, err:
logger.log_exc(err)
- return created_instances
-
-
- def delete_instances(self, instance_name, tenant_name):
+ return slivers
- def _delete_security_group(instance):
- security_group = instance.metadata.get('security_groups', '')
+ def delete_instance(self, instance):
+
+ def _delete_security_group(inst):
+ security_group = inst.metadata.get('security_groups', '')
if security_group:
manager = SecurityGroup(self.driver)
timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
start_time = time.time()
instance_deleted = False
while instance_deleted == False and (time.time() - start_time) < timeout:
- inst = self.driver.shell.nova_manager.servers.findall(id=instance.id)
- if not inst:
+ tmp_inst = self.driver.shell.nova_manager.servers.findall(id=inst.id)
+ if not tmp_inst:
instance_deleted = True
- time.sleep(1)
+ time.sleep(.5)
manager.delete_security_group(security_group)
thread_manager = ThreadManager()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
+ tenant = self.driver.shell.auth_manager.tenants.find(id=instance.tenant_id)
+ self.driver.shell.nova_manager.connect(tenant=tenant.name)
+ args = {'name': instance.name,
+ 'id': instance.id}
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
+ security_group_manager = SecurityGroup(self.driver)
for instance in instances:
# destroy instance
self.driver.shell.nova_manager.servers.delete(instance)
# deleate this instance's security groups
thread_manager.run(_delete_security_group, instance)
- return True
-
+ return 1
- def stop_instances(self, instance_name, tenant_name):
+ def stop_instances(self, instance_name, tenant_name, id=None):
self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.driver.shell.nova_manager.servers.findall(name=instance_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
for instance in instances:
self.driver.shell.nova_manager.servers.pause(instance)
return 1
+ def start_instances(self, instance_name, tenant_name, id=None):
+ self.driver.shell.nova_manager.connect(tenant=tenant_name)
+ args = {'name': instance_name}
+ if id:
+ args['id'] = id
+ instances = self.driver.shell.nova_manager.servers.findall(**args)
+ for instance in instances:
+ self.driver.shell.nova_manager.servers.resume(instance)
+ return 1
+
+ def restart_instances(self, instacne_name, tenant_name, id=None):
+ self.stop_instances(instance_name, tenant_name, id)
+ self.start_instances(instance_name, tenant_name, id)
+ return 1
+
def update_instances(self, project_name):
pass
#!/usr/bin/python
+from collections import defaultdict
from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
from sfa.util.sfatime import utcparse, datetime_to_string
from sfa.util.sfalogging import logger
-
+from sfa.util.faults import SliverDoesNotExist
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.link import Link
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.login import Login
from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.services import ServicesElement
from sfa.rspecs.elements.pltag import PLTag
from sfa.rspecs.elements.lease import Lease
from sfa.rspecs.elements.granularity import Granularity
from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, slicename_to_hrn
from sfa.planetlab.vlink import get_tc_rate
from sfa.planetlab.topology import Topology
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import SliverAllocation
+
import time
def __init__(self, driver):
self.driver = driver
+
+ def get_nodes(self, options={}):
+ filter = {'peer_id': None}
+ geni_available = options.get('geni_available')
+ if geni_available == True:
+ filter['boot_state'] = 'boot'
+ nodes = self.driver.shell.GetNodes(filter)
+
+ return nodes
def get_sites(self, filter={}):
sites = {}
pl_initscripts[initscript['initscript_id']] = initscript
return pl_initscripts
+ def get_slivers(self, urns, options={}):
+ names = set()
+ slice_ids = set()
+ node_ids = []
+ for urn in urns:
+ xrn = PlXrn(xrn=urn)
+ if xrn.type == 'sliver':
+ # id: slice_id-node_id
+ try:
+ sliver_id_parts = xrn.get_sliver_id_parts()
+ slice_id = int(sliver_id_parts[0])
+ node_id = int(sliver_id_parts[1])
+ slice_ids.add(slice_id)
+ node_ids.append(node_id)
+ except ValueError:
+ pass
+ else:
+ names.add(xrn.pl_slicename())
- def get_slice_and_slivers(self, slice_xrn):
- """
- Returns a dict of slivers keyed on the sliver's node_id
- """
- slivers = {}
- slice = None
- if not slice_xrn:
- return (slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
- slice_name = hrn_to_pl_slicename(slice_hrn)
- slices = self.driver.shell.GetSlices(slice_name)
+ filter = {}
+ if names:
+ filter['name'] = list(names)
+ if slice_ids:
+ filter['slice_id'] = list(slice_ids)
+ # get slices
+ slices = self.driver.shell.GetSlices(filter)
if not slices:
- return (slice, slivers)
- slice = slices[0]
-
- # sort slivers by node id
- for node_id in slice['node_ids']:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node_id)
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id': sliver_xrn.urn,
- 'name': slice['name'],
- 'type': 'plab-vserver',
- 'tags': []})
- slivers[node_id]= sliver
-
- # sort sliver attributes by node id
- tags = self.driver.shell.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
- for tag in tags:
- # most likely a default/global sliver attribute (node_id == None)
- if tag['node_id'] not in slivers:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=tag['node_id'])
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id': sliver_xrn.urn,
- 'name': slice['name'],
- 'type': 'plab-vserver',
- 'tags': []})
- slivers[tag['node_id']] = sliver
- slivers[tag['node_id']]['tags'].append(tag)
+ return []
+ slice = slices[0]
+ slice['hrn'] = PlXrn(auth=self.driver.hrn, slicename=slice['name']).hrn
+
+ # get sliver users
+ persons = []
+ person_ids = []
+ for slice in slices:
+ person_ids.extend(slice['person_ids'])
+ if person_ids:
+ persons = self.driver.shell.GetPersons(person_ids)
+
+ # get user keys
+ keys = {}
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
- return (slice, slivers)
-
- def get_nodes_and_links(self, slice_xrn, slice=None,slivers=[], options={}):
- # if we are dealing with a slice that has no node just return
- # and empty list
- if slice_xrn:
- if not slice or not slice['node_ids']:
- return ([],[])
+ if key_ids:
+ key_list = self.driver.shell.GetKeys(key_ids)
+ for key in key_list:
+ keys[key['key_id']] = key
+
+ # construct user key info
+ users = []
+ for person in persons:
+ name = person['email'][0:person['email'].index('@')]
+ user = {
+ 'login': slice['name'],
+ 'user_urn': Xrn('%s.%s' % (self.driver.hrn, name), type='user').urn,
+ 'keys': [keys[k_id]['key'] for k_id in person['key_ids'] if k_id in keys]
+ }
+ users.append(user)
+
+ if node_ids:
+ node_ids = [node_id for node_id in node_ids if node_id in slice['node_ids']]
+ slice['node_ids'] = node_ids
+ tags_dict = self.get_slice_tags(slice)
+ nodes_dict = self.get_slice_nodes(slice, options)
+ slivers = []
+ for node in nodes_dict.values():
+ node.update(slice)
+ node['tags'] = tags_dict[node['node_id']]
+ sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
+ node['sliver_id'] = Xrn(sliver_hrn, type='sliver').urn
+ node['urn'] = node['sliver_id']
+ node['services_user'] = users
+ slivers.append(node)
+ return slivers
+
+ def node_to_rspec_node(self, node, sites, interfaces, node_tags, pl_initscripts=[], grain=None, options={}):
+ rspec_node = NodeElement()
+ # xxx how to retrieve site['login_base']
+ site=sites[node['site_id']]
+ rspec_node['component_id'] = PlXrn(self.driver.hrn, hostname=node['hostname']).get_urn()
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.hrn, site['login_base']), 'authority+sa')
+ # do not include boot state (<available> element) in the manifest rspec
+ rspec_node['boot_state'] = node['boot_state']
+ if node['boot_state'] == 'boot':
+ rspec_node['available'] = 'true'
+ else:
+ rspec_node['available'] = 'false'
+ rspec_node['exclusive'] = 'false'
+ rspec_node['hardware_types'] = [HardwareType({'name': 'plab-pc'}),
+ HardwareType({'name': 'pc'})]
+ # only doing this because protogeni rspec needs
+ # to advertise available initscripts
+ rspec_node['pl_initscripts'] = pl_initscripts.values()
+ # add site/interface info to nodes.
+ # assumes that sites, interfaces and tags have already been prepared.
+ if site['longitude'] and site['latitude']:
+ location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
+ rspec_node['location'] = location
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+ rspec_node['interfaces'] = []
+ if_count=0
+ for if_id in node['interface_ids']:
+ interface = Interface(interfaces[if_id])
+ interface['ipv4'] = interface['ip']
+ interface['component_id'] = PlXrn(auth=self.driver.hrn,
+ interface='node%s:eth%s' % (node['node_id'], if_count)).get_urn()
+ # interfaces in the manifest need a client id
+ if slice:
+ interface['client_id'] = "%s:%s" % (node['node_id'], if_id)
+ rspec_node['interfaces'].append(interface)
+ if_count+=1
+ tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids'] if tag_id in node_tags]
+ rspec_node['tags'] = tags
+ return rspec_node
+
+ def sliver_to_rspec_node(self, sliver, sites, interfaces, node_tags, \
+ pl_initscripts, sliver_allocations):
+ # get the granularity in second for the reservation system
+ grain = self.driver.shell.GetLeaseGranularity()
+ rspec_node = self.node_to_rspec_node(sliver, sites, interfaces, node_tags, pl_initscripts, grain)
+ # xxx how to retrieve site['login_base']
+ rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
+ # remove interfaces from manifest
+ rspec_node['interfaces'] = []
+ # add sliver info
+ rspec_sliver = Sliver({'sliver_id': sliver['urn'],
+ 'name': sliver['name'],
+ 'type': 'plab-vserver',
+ 'tags': []})
+ rspec_node['sliver_id'] = rspec_sliver['sliver_id']
+ rspec_node['client_id'] = sliver_allocations[sliver['urn']].client_id
+ if sliver_allocations[sliver['urn']].component_id:
+ rspec_node['component_id'] = sliver_allocations[sliver['urn']].component_id
+ rspec_node['slivers'] = [rspec_sliver]
+
+ # slivers always provide the ssh service
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': sliver['hostname'],
+ 'port':'22',
+ 'username': sliver['name'],
+ 'login': sliver['name']
+ })
+ service = ServicesElement({'login': login,
+ 'services_user': sliver['services_user']})
+ rspec_node['services'] = [service]
+ return rspec_node
+
+ def get_slice_tags(self, slice):
+ slice_tag_ids = []
+ slice_tag_ids.extend(slice['slice_tag_ids'])
+ tags = self.driver.shell.GetSliceTags({'slice_tag_id': slice_tag_ids})
+ # sorted by node_id
+ tags_dict = defaultdict(list)
+ for tag in tags:
+ tags_dict[tag['node_id']] = tag
+ return tags_dict
- filter = {}
+ def get_slice_nodes(self, slice, options={}):
+ nodes_dict = {}
+ filter = {'peer_id': None}
tags_filter = {}
- if slice and 'node_ids' in slice and slice['node_ids']:
+ if slice and slice.get('node_ids'):
filter['node_id'] = slice['node_ids']
- tags_filter=filter.copy()
-
- geni_available = options.get('geni_available')
+ else:
+ # there are no nodes to look up
+ return nodes_dict
+ tags_filter=filter.copy()
+ geni_available = options.get('geni_available')
if geni_available == True:
- filter['boot_state'] = 'boot'
-
- filter.update({'peer_id': None})
+ filter['boot_state'] = 'boot'
nodes = self.driver.shell.GetNodes(filter)
-
- # get the granularity in second for the reservation system
- grain = self.driver.shell.GetLeaseGranularity()
-
- site_ids = []
- interface_ids = []
- tag_ids = []
- nodes_dict = {}
for node in nodes:
- site_ids.append(node['site_id'])
- interface_ids.extend(node['interface_ids'])
- tag_ids.extend(node['node_tag_ids'])
nodes_dict[node['node_id']] = node
-
- # get sites
- sites_dict = self.get_sites({'site_id': site_ids})
- # get interfaces
- interfaces = self.get_interfaces({'interface_id':interface_ids})
- # get tags
- node_tags = self.get_node_tags(tags_filter)
- # get initscripts
- pl_initscripts = self.get_pl_initscripts()
-
- links = self.get_links(sites_dict, nodes_dict, interfaces)
-
- rspec_nodes = []
- for node in nodes:
- # skip whitelisted nodes
- if node['slice_ids_whitelist']:
- if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
- continue
- rspec_node = Node()
- # xxx how to retrieve site['login_base']
- site_id=node['site_id']
- site=sites_dict[site_id]
- rspec_node['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], node['hostname'])
- rspec_node['component_name'] = node['hostname']
- rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
- rspec_node['authority_id'] = hrn_to_urn(PlXrn.site_hrn(self.driver.hrn, site['login_base']), 'authority+sa')
- # do not include boot state (<available> element) in the manifest rspec
- if not slice:
- rspec_node['boot_state'] = node['boot_state']
-
- #add the exclusive tag to distinguish between Shared and Reservable nodes
- if node['node_type'] == 'reservable':
- rspec_node['exclusive'] = 'true'
+ return nodes_dict
+
+ def rspec_node_to_geni_sliver(self, rspec_node, sliver_allocations = {}):
+ if rspec_node['sliver_id'] in sliver_allocations:
+ # set sliver allocation and operational status
+ sliver_allocation = sliver_allocations[rspec_node['sliver_id']]
+ if sliver_allocation:
+ allocation_status = sliver_allocation.allocation_state
+ if allocation_status == 'geni_allocated':
+ op_status = 'geni_pending_allocation'
+ elif allocation_status == 'geni_provisioned':
+ if rspec_node['boot_state'] == 'boot':
+ op_status = 'geni_ready'
+ else:
+ op_status = 'geni_failed'
+ else:
+ op_status = 'geni_unknown'
else:
- rspec_node['exclusive'] = 'false'
-
- rspec_node['hardware_types'] = [HardwareType({'name': 'plab-pc'}),
- HardwareType({'name': 'pc'})]
- # only doing this because protogeni rspec needs
- # to advertise available initscripts
- rspec_node['pl_initscripts'] = pl_initscripts.values()
- # add site/interface info to nodes.
- # assumes that sites, interfaces and tags have already been prepared.
- site = sites_dict[node['site_id']]
- if site['longitude'] and site['latitude']:
- location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
- rspec_node['location'] = location
- # Granularity
- granularity = Granularity({'grain': grain})
- rspec_node['granularity'] = granularity
-
- rspec_node['interfaces'] = []
- if_count=0
- for if_id in node['interface_ids']:
- interface = Interface(interfaces[if_id])
- interface['ipv4'] = interface['ip']
- interface['component_id'] = PlXrn(auth=self.driver.hrn,
- interface='node%s:eth%s' % (node['node_id'], if_count)).get_urn()
- # interfaces in the manifest need a client id
- if slice:
- interface['client_id'] = "%s:%s" % (node['node_id'], if_id)
- rspec_node['interfaces'].append(interface)
- if_count+=1
-
- tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']\
- if tag_id in node_tags]
- rspec_node['tags'] = tags
- if node['node_id'] in slivers:
- # add sliver info
- sliver = slivers[node['node_id']]
- rspec_node['sliver_id'] = sliver['sliver_id']
- rspec_node['slivers'] = [sliver]
- for tag in sliver['tags']:
- if tag['tagname'] == 'client_id':
- rspec_node['client_id'] = tag['value']
-
- # slivers always provide the ssh service
- login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
- service = Services({'login': login})
- rspec_node['services'] = [service]
- rspec_nodes.append(rspec_node)
- return (rspec_nodes, links)
-
+ allocation_status = 'geni_unallocated'
+ # required fields
+ geni_sliver = {'geni_sliver_urn': rspec_node['sliver_id'],
+ 'geni_expires': rspec_node['expires'],
+ 'geni_allocation_status' : allocation_status,
+ 'geni_operational_status': op_status,
+ 'geni_error': '',
+ }
+ return geni_sliver
- def get_leases(self, slice=None, options={}):
+ def get_leases(self, slice_xrn=None, slice=None, options={}):
+ if slice_xrn and not slice:
+ return []
+
now = int(time.time())
filter={}
filter.update({'clip':now})
site_id=lease['site_id']
site=sites_dict[site_id]
- rspec_lease['lease_id'] = lease['lease_id']
- rspec_lease['component_id'] = PlXrn(self.driver.hrn, hostname=lease['hostname']).urn
- slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
- #rspec_lease['lease_id'] = lease['lease_id']
+ rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], lease['hostname'])
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
rspec_lease['slice_id'] = slice_urn
rspec_lease['start_time'] = lease['t_from']
rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) / grain
return rspec_leases
- def get_rspec(self, slice_xrn=None, version = None, options={}):
+ def list_resources(self, version = None, options={}):
version_manager = VersionManager()
version = version_manager.get_version(version)
- if not slice_xrn:
- rspec_version = version_manager._get_version(version.type, version.version, 'ad')
- else:
- rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
-
- slice, slivers = self.get_slice_and_slivers(slice_xrn)
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
rspec = RSpec(version=rspec_version, user_options=options)
- if slice and 'expires' in slice:
- rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
-
- if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
- if slice_xrn and not slivers:
- nodes, links = [], []
- else:
- nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers, options)
- rspec.version.add_nodes(nodes)
+
+ if not options.get('list_leases') or options['list_leases'] != 'leases':
+ # get nodes
+ nodes = self.get_nodes(options)
+ site_ids = []
+ interface_ids = []
+ tag_ids = []
+ nodes_dict = {}
+ for node in nodes:
+ site_ids.append(node['site_id'])
+ interface_ids.extend(node['interface_ids'])
+ tag_ids.extend(node['node_tag_ids'])
+ nodes_dict[node['node_id']] = node
+ sites = self.get_sites({'site_id': site_ids})
+ interfaces = self.get_interfaces({'interface_id':interface_ids})
+ node_tags = self.get_node_tags({'node_tag_id': tag_ids})
+ pl_initscripts = self.get_pl_initscripts()
+ # convert nodes to rspec nodes
+ rspec_nodes = []
+ for node in nodes:
+ rspec_node = self.node_to_rspec_node(node, sites, interfaces, node_tags, pl_initscripts)
+ rspec_nodes.append(rspec_node)
+ rspec.version.add_nodes(rspec_nodes)
+
+ # add links
+ links = self.get_links(sites, nodes_dict, interfaces)
rspec.version.add_links(links)
- # add sliver defaults
- default_sliver = slivers.get(None, [])
- if default_sliver:
- default_sliver_attribs = default_sliver.get('tags', [])
- for attrib in default_sliver_attribs:
- logger.info(attrib)
- rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
-
++
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases = self.get_leases(slice_xrn, slice)
+ rspec.version.add_leases(leases)
+
return rspec.toxml()
+ def describe(self, urns, version=None, options={}):
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+ # get slivers
+ geni_slivers = []
+ slivers = self.get_slivers(urns, options)
+ if slivers:
+ rspec_expires = datetime_to_string(utcparse(slivers[0]['expires']))
+ else:
+ rspec_expires = datetime_to_string(utcparse(time.time()))
+ rspec.xml.set('expires', rspec_expires)
+
+ # lookup the sliver allocations
+ geni_urn = urns[0]
+ sliver_ids = [sliver['sliver_id'] for sliver in slivers]
+ constraint = SliverAllocation.sliver_id.in_(sliver_ids)
+ sliver_allocations = dbsession.query(SliverAllocation).filter(constraint)
+ sliver_allocation_dict = {}
+ for sliver_allocation in sliver_allocations:
+ geni_urn = sliver_allocation.slice_urn
+ sliver_allocation_dict[sliver_allocation.sliver_id] = sliver_allocation
+
+ if not options.get('list_leases') or options['list_leases'] != 'leases':
+ # add slivers
+ site_ids = []
+ interface_ids = []
+ tag_ids = []
+ nodes_dict = {}
+ for sliver in slivers:
+ site_ids.append(sliver['site_id'])
+ interface_ids.extend(sliver['interface_ids'])
+ tag_ids.extend(sliver['node_tag_ids'])
+ nodes_dict[sliver['node_id']] = sliver
+ sites = self.get_sites({'site_id': site_ids})
+ interfaces = self.get_interfaces({'interface_id':interface_ids})
+ node_tags = self.get_node_tags({'node_tag_id': tag_ids})
+ pl_initscripts = self.get_pl_initscripts()
+ rspec_nodes = []
+ for sliver in slivers:
+ if sliver['slice_ids_whitelist'] and sliver['slice_id'] not in sliver['slice_ids_whitelist']:
+ continue
+ rspec_node = self.sliver_to_rspec_node(sliver, sites, interfaces, node_tags,
+ pl_initscripts, sliver_allocation_dict)
+ # manifest node element shouldn't contain available attribute
+ rspec_node.pop('available')
+ rspec_nodes.append(rspec_node)
+ geni_sliver = self.rspec_node_to_geni_sliver(rspec_node, sliver_allocation_dict)
+ geni_slivers.append(geni_sliver)
+ rspec.version.add_nodes(rspec_nodes)
+
+ # add sliver defaults
+ #default_sliver = slivers.get(None, [])
+ #if default_sliver:
+ # default_sliver_attribs = default_sliver.get('tags', [])
+ # for attrib in default_sliver_attribs:
+ # rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+
+ # add links
+ links = self.get_links(sites, nodes_dict, interfaces)
+ rspec.version.add_links(links)
+
+ if not options.get('list_leases') or options['list_leases'] != 'resources':
+ if slivers:
+ leases = self.get_leases(slivers[0])
+ rspec.version.add_leases(leases)
+
+ return {'geni_urn': geni_urn,
+ 'geni_rspec': rspec.toxml(),
+ 'geni_slivers': geni_slivers}
-import time
import datetime
#
from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
- RecordNotFound, SfaNotImplemented, SliverDoesNotExist
-
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist, SearchFailed, \
+ UnsupportedOperation, Forbidden
from sfa.util.sfalogging import logger
from sfa.util.defaultdict import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
# one would think the driver should not need to mess with the SFA db, but..
from sfa.storage.alchemy import dbsession
-from sfa.storage.model import RegRecord
+from sfa.storage.model import RegRecord, SliverAllocation
+from sfa.trust.credential import Credential
# used to be used in get_ticket
#from sfa.trust.sfaticket import SfaTicket
-
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
# the driver interface, mostly provides default behaviours
from sfa.managers.driver import Driver
-
from sfa.planetlab.plshell import PlShell
import sfa.planetlab.peers as peers
from sfa.planetlab.plaggregate import PlAggregate
if PlDriver.cache is None:
PlDriver.cache = Cache()
self.cache = PlDriver.cache
+
+ def sliver_to_slice_xrn(self, xrn):
+ sliver_id_parts = Xrn(xrn).get_sliver_id_parts()
+ filter = {}
+ try:
+ filter['slice_id'] = int(sliver_id_parts[0])
+ except ValueError:
+ fliter['name'] = sliver_id_parts[0]
+ slices = self.shell.GetSlices(filter)
+ if not slices:
+ raise Forbidden("Unable to locate slice record for sliver: %s" % xrn)
+ slice = slices[0]
+ slice_xrn = PlXrn(auth=self.hrn, slicename=slice['name'])
+ return slice_xrn
+ def check_sliver_credentials(self, creds, urns):
+ # build list of cred object hrns
+ slice_cred_names = []
+ for cred in creds:
+ slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
+ slice_cred_names.append(PlXrn(xrn=slice_cred_hrn).pl_slicename())
+
+ # look up slice name of slivers listed in urns arg
+ slice_ids = []
+ for urn in urns:
+ sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
+ try:
+ slice_ids.append(int(sliver_id_parts[0]))
+ except ValueError:
+ pass
+
+ if not slice_ids:
+ raise Forbidden("sliver urn not provided")
+
+ slices = self.shell.GetSlices(slice_ids)
+ sliver_names = [slice['name'] for slice in slices]
+
+ # make sure we have a credential for every specified sliver ierd
+ for sliver_name in sliver_names:
+ if sliver_name not in slice_cred_names:
+ msg = "Valid credential not found for target: %s" % sliver_name
+ raise Forbidden(msg)
+
########################################
########## registry oriented
########################################
self.shell.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
elif type == 'node':
- login_base = PlXrn(xrn=sfa_record['authority'],type='node').pl_login_base()
+ login_base = PlXrn(xrn=sfa_record['authority'],type='authority').pl_login_base()
nodes = self.shell.GetNodes([pl_record['hostname']])
if not nodes:
pointer = self.shell.AddNode(login_base, pl_record)
def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
pointer = old_sfa_record['pointer']
type = old_sfa_record['type']
+ new_key_pointer = None
# new_key implemented for users only
if new_key and type not in [ 'user' ]:
keys = person['key_ids']
keys = self.shell.GetKeys(person['key_ids'])
- # Delete all stale keys
key_exists = False
for key in keys:
- if new_key != key['key']:
- self.shell.DeleteKey(key['key_id'])
- else:
+ if new_key == key['key']:
key_exists = True
+ new_key_pointer = key['key_id']
+ break
if not key_exists:
- self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+ new_key_pointer = self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
elif type == "node":
self.shell.UpdateNode(pointer, new_sfa_record)
- return True
+ return (pointer, new_key_pointer)
##########
def testbed_name (self): return "myplc"
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
def aggregate_version (self):
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- return {
- 'testbed':self.testbed_name(),
- 'geni_request_rspec_versions': request_rspec_versions,
- 'geni_ad_rspec_versions': ad_rspec_versions,
- }
-
- def list_slices (self, creds, options):
- # look in cache first
- if self.cache:
- slices = self.cache.get('slices')
- if slices:
- logger.debug("PlDriver.list_slices returns from cache")
- return slices
-
- # get data from db
- slices = self.shell.GetSlices({'peer_id': None}, ['name'])
- slice_hrns = [slicename_to_hrn(self.hrn, slice['name']) for slice in slices]
- slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
-
- # cache the result
- if self.cache:
- logger.debug ("PlDriver.list_slices stores value in cache")
- self.cache.add('slices', slice_urns)
-
- return slice_urns
-
- # first 2 args are None in case of resource discovery
- def list_resources (self, slice_urn, slice_hrn, creds, options):
- cached_requested = options.get('cached', True)
-
- version_manager = VersionManager()
- # get the rspec's return format from options
- rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
- version_string = "rspec_%s" % (rspec_version)
-
- #panos adding the info option to the caching key (can be improved)
- if options.get('info'):
- version_string = version_string + "_"+options.get('info', 'default')
+ return {}
- # Adding the list_leases option to the caching key
- if options.get('list_leases'):
- version_string = version_string + "_"+options.get('list_leases', 'default')
-
- # Adding geni_available to caching key
- if options.get('geni_available'):
- version_string = version_string + "_" + str(options.get('geni_available'))
-
- # look in cache first
- if cached_requested and self.cache and not slice_hrn:
- rspec = self.cache.get(version_string)
- if rspec:
- logger.debug("PlDriver.ListResources: returning cached advertisement")
- return rspec
-
- #panos: passing user-defined options
- #print "manager options = ",options
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, version=None, options={}):
aggregate = PlAggregate(self)
- rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
- options=options)
-
- # cache the result
- if self.cache and not slice_hrn:
- logger.debug("PlDriver.ListResources: stores advertisement in cache")
- self.cache.add(version_string, rspec)
-
+ rspec = aggregate.list_resources(version=version, options=options)
return rspec
-
- def sliver_status (self, slice_urn, slice_hrn):
- # find out where this slice is currently running
- slicename = hrn_to_pl_slicename(slice_hrn)
-
- slices = self.shell.GetSlices([slicename], ['slice_id', 'node_ids','person_ids','name','expires'])
- if len(slices) == 0:
- raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
- slice = slices[0]
-
- # report about the local nodes only
- nodes = self.shell.GetNodes({'node_id':slice['node_ids'],'peer_id':None},
- ['node_id', 'hostname', 'site_id', 'boot_state', 'last_contact'])
-
- if len(nodes) == 0:
- raise SliverDoesNotExist("You have not allocated any slivers here")
-
- # get login info
- user = {}
- if slice['person_ids']:
- persons = self.shell.GetPersons(slice['person_ids'], ['key_ids'])
- key_ids = [key_id for person in persons for key_id in person['key_ids']]
- person_keys = self.shell.GetKeys(key_ids)
- keys = [key['key'] for key in person_keys]
-
- user.update({'urn': slice_urn,
- 'login': slice['name'],
- 'protocol': ['ssh'],
- 'port': ['22'],
- 'keys': keys})
-
- site_ids = [node['site_id'] for node in nodes]
-
- result = {}
- top_level_status = 'unknown'
- if nodes:
- top_level_status = 'ready'
- result['geni_urn'] = slice_urn
- result['pl_login'] = slice['name']
- result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
- result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
-
- resources = []
- for node in nodes:
- res = {}
- res['pl_hostname'] = node['hostname']
- res['pl_boot_state'] = node['boot_state']
- res['pl_last_contact'] = node['last_contact']
- res['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
- if node['last_contact'] is not None:
-
- res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node['node_id'])
- sliver_xrn.set_authority(self.hrn)
-
- res['geni_urn'] = sliver_xrn.urn
- if node['boot_state'] == 'boot':
- res['geni_status'] = 'ready'
- else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
- res['users'] = [user]
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
- return result
- def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ def describe(self, urns, version, options={}):
+ aggregate = PlAggregate(self)
+ return aggregate.describe(urns, version=version, options=options)
+
+ def status (self, urns, options={}):
+ aggregate = PlAggregate(self)
+ desc = aggregate.describe(urns)
+ status = {'geni_urn': desc['geni_urn'],
+ 'geni_slivers': desc['geni_slivers']}
+ return status
+ def allocate (self, urn, rspec_string, expiration, options={}):
+ xrn = Xrn(urn)
aggregate = PlAggregate(self)
slices = PlSlices(self)
- peer = slices.get_peer(slice_hrn)
- sfa_peer = slices.get_sfa_peer(slice_hrn)
+ peer = slices.get_peer(xrn.get_hrn())
+ sfa_peer = slices.get_sfa_peer(xrn.get_hrn())
slice_record=None
+ users = options.get('geni_users', [])
if users:
slice_record = users[0].get('slice_record', {})
requested_attributes = rspec.version.get_slice_attributes()
# ensure site record exists
- site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ site = slices.verify_site(xrn.hrn, slice_record, peer, sfa_peer, options=options)
# ensure slice record exists
- slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ slice = slices.verify_slice(xrn.hrn, slice_record, peer, sfa_peer, expiration=expiration, options=options)
# ensure person records exists
- persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+ #persons = slices.verify_persons(xrn.hrn, slice, users, peer, sfa_peer, options=options)
# ensure slice attributes exists
slices.verify_slice_attributes(slice, requested_attributes, options=options)
-
+
# add/remove slice from nodes
- requested_slivers = {}
- slivers = rspec.version.get_nodes_with_slivers()
- nodes = slices.verify_slice_nodes(slice, slivers, peer)
-
+ request_nodes = rspec.version.get_nodes_with_slivers()
+ nodes = slices.verify_slice_nodes(urn, slice, request_nodes, peer)
+
# add/remove links links
slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
# add/remove leases
- requested_leases = []
- kept_leases = []
- for lease in rspec.version.get_leases():
- requested_lease = {}
- if not lease.get('lease_id'):
- requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
- requested_lease['start_time'] = lease.get('start_time')
- requested_lease['duration'] = lease.get('duration')
- else:
- kept_leases.append(int(lease['lease_id']))
- if requested_lease.get('hostname'):
- requested_leases.append(requested_lease)
+ try:
+ rspec_requested_leases = rspec.version.get_leases()
+ leases = slices.verify_slice_leases(slice, rspec_requested_leases, peer)
+ except:
+ pass
- #requested_leases = []
- #kept_leases = []
- #for lease in rspec.version.get_leases():
- # requested_lease = {}
- # if not lease.get('lease_id'):
- # requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
- # requested_lease['start_time'] = lease.get('start_time')
- # requested_lease['duration'] = lease.get('duration')
- # else:
- # kept_leases.append(int(lease['lease_id']))
- # if requested_lease.get('hostname'):
- # requested_leases.append(requested_lease)
-
- #leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
-
+
- leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
# handle MyPLC peer association.
# only used by plc and ple.
- slices.handle_peer(site, slice, persons, peer)
+ slices.handle_peer(site, slice, None, peer)
- return aggregate.get_rspec(slice_xrn=slice_urn,
- version=rspec.version)
+ return aggregate.describe([xrn.get_urn()], version=rspec.version)
- def delete_sliver (self, slice_urn, slice_hrn, creds, options):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename})
- if not slices:
- return True
- slice = slices[0]
-
- # leases
- leases = self.shell.GetLeases({'name': slicename})
- leases_ids = [lease['lease_id'] for lease in leases ]
- # determine if this is a peer slice
- # xxx I wonder if this would not need to use PlSlices.get_peer instead
- # in which case plc.peers could be deprecated as this here
- # is the only/last call to this last method in plc.peers
- peer = peers.get_peer(self, slice_hrn)
- try:
- if peer:
- self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
- self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
- if len(leases_ids) > 0:
- self.shell.DeleteLeases(leases_ids)
- finally:
- if peer:
- self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
- return True
-
- def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
- if not slices:
- raise RecordNotFound(slice_hrn)
- slice = slices[0]
+ def provision(self, urns, options={}):
+ # update users
+ slices = PlSlices(self)
+ aggregate = PlAggregate(self)
+ slivers = aggregate.get_slivers(urns)
+ slice = slivers[0]
+ peer = slices.get_peer(slice['hrn'])
+ sfa_peer = slices.get_sfa_peer(slice['hrn'])
+ users = options.get('geni_users', [])
+ persons = slices.verify_persons(None, slice, users, peer, sfa_peer, options=options)
+ slices.handle_peer(None, None, persons, peer)
+ # update sliver allocation states and set them to geni_provisioned
+ sliver_ids = [sliver['sliver_id'] for sliver in slivers]
+ SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned')
+ version_manager = VersionManager()
+ rspec_version = version_manager.get_version(options['geni_rspec_version'])
+ return self.describe(urns, rspec_version, options=options)
+
+ def delete(self, urns, options={}):
+ # collect sliver ids so we can update sliver allocation states after
+ # we remove the slivers.
+ aggregate = PlAggregate(self)
+ slivers = aggregate.get_slivers(urns)
+ if slivers:
+ slice_id = slivers[0]['slice_id']
+ node_ids = []
+ sliver_ids = []
+ for sliver in slivers:
+ node_ids.append(sliver['node_id'])
+ sliver_ids.append(sliver['sliver_id'])
+
+ # determine if this is a peer slice
+ # xxx I wonder if this would not need to use PlSlices.get_peer instead
+ # in which case plc.peers could be deprecated as this here
+ # is the only/last call to this last method in plc.peers
+ slice_hrn = PlXrn(auth=self.hrn, slicename=slivers[0]['name']).get_hrn()
+ peer = peers.get_peer(self, slice_hrn)
+ try:
+ if peer:
+ self.shell.UnBindObjectFromPeer('slice', slice_id, peer)
+
+ self.shell.DeleteSliceFromNodes(slice_id, node_ids)
+
+ # delete sliver allocation states
+ SliverAllocation.delete_allocations(sliver_ids)
+ finally:
+ if peer:
+ self.shell.BindObjectToPeer('slice', slice_id, peer, slice['peer_slice_id'])
+
+ # prepare return struct
+ geni_slivers = []
+ for sliver in slivers:
+ geni_slivers.append(
+ {'geni_sliver_urn': sliver['sliver_id'],
+ 'geni_allocation_status': 'geni_unallocated',
+ 'geni_expires': datetime_to_string(utcparse(sliver['expires']))})
+ return geni_slivers
-
++
+ def renew (self, urns, expiration_time, options={}):
+ aggregate = PlAggregate(self)
+ slivers = aggregate.get_slivers(urns)
+ if not slivers:
+ raise SearchFailed(urns)
+ slice = slivers[0]
requested_time = utcparse(expiration_time)
record = {'expires': int(datetime_to_epoch(requested_time))}
- try:
- self.shell.UpdateSlice(slice['slice_id'], record)
- return True
- except:
- return False
+ self.shell.UpdateSlice(slice['slice_id'], record)
+ description = self.describe(urns, None, options)
+ return description['geni_slivers']
+
- # remove the 'enabled' tag
- def start_slice (self, slice_urn, slice_hrn, creds):
- slicename = hrn_to_pl_slicename(slice_hrn)
- slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
- if not slices:
- raise RecordNotFound(slice_hrn)
- slice_id = slices[0]['slice_id']
- slice_tags = self.shell.GetSliceTags({'slice_id': slice_id, 'tagname': 'enabled'}, ['slice_tag_id'])
- # just remove the tag if it exists
- if slice_tags:
- self.shell.DeleteSliceTag(slice_tags[0]['slice_tag_id'])
- return 1
+ def perform_operational_action (self, urns, action, options={}):
+ # MyPLC doesn't support operational actions. Lets pretend like it
+ # supports start, but reject everything else.
+ action = action.lower()
+ if action not in ['geni_start']:
+ raise UnsupportedOperation(action)
+
+ # fault if sliver is not full allocated (operational status is geni_pending_allocation)
+ description = self.describe(urns, None, options)
+ for sliver in description['geni_slivers']:
+ if sliver['geni_operational_status'] == 'geni_pending_allocation':
+ raise UnsupportedOperation(action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)")
+ #
+ # Perform Operational Action Here
+ #
+
+ geni_slivers = self.describe(urns, None, options)['geni_slivers']
+ return geni_slivers
# set the 'enabled' tag to 0
- def stop_slice (self, slice_urn, slice_hrn, creds):
- slicename = hrn_to_pl_slicename(slice_hrn)
+ def shutdown (self, xrn, options={}):
+ xrn = PlXrn(xrn=xrn, type='slice')
+ slicename = xrn.pl_slicename()
slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
if not slices:
raise RecordNotFound(slice_hrn)
tag_id = slice_tags[0]['slice_tag_id']
self.shell.UpdateSliceTag(tag_id, '0')
return 1
-
- def reset_slice (self, slice_urn, slice_hrn, creds):
- raise SfaNotImplemented ("reset_slice not available at this interface")
-
- # xxx this code is quite old and has not run for ages
- # it is obviously totally broken and needs a rewrite
- def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
- raise SfaNotImplemented,"PlDriver.get_ticket needs a rewrite"
-# please keep this code for future reference
-# slices = PlSlices(self)
-# peer = slices.get_peer(slice_hrn)
-# sfa_peer = slices.get_sfa_peer(slice_hrn)
-#
-# # get the slice record
-# credential = api.getCredential()
-# interface = api.registries[api.hrn]
-# registry = api.server_proxy(interface, credential)
-# records = registry.Resolve(xrn, credential)
-#
-# # make sure we get a local slice record
-# record = None
-# for tmp_record in records:
-# if tmp_record['type'] == 'slice' and \
-# not tmp_record['peer_authority']:
-# #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
-# slice_record = SliceRecord(dict=tmp_record)
-# if not record:
-# raise RecordNotFound(slice_hrn)
-#
-# # similar to CreateSliver, we must verify that the required records exist
-# # at this aggregate before we can issue a ticket
-# # parse rspec
-# rspec = RSpec(rspec_string)
-# requested_attributes = rspec.version.get_slice_attributes()
-#
-# # ensure site record exists
-# site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
-# # ensure slice record exists
-# slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
-# # ensure person records exists
-# # xxx users is undefined in this context
-# persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
-# # ensure slice attributes exists
-# slices.verify_slice_attributes(slice, requested_attributes)
-#
-# # get sliver info
-# slivers = slices.get_slivers(slice_hrn)
-#
-# if not slivers:
-# raise SliverDoesNotExist(slice_hrn)
-#
-# # get initscripts
-# initscripts = []
-# data = {
-# 'timestamp': int(time.time()),
-# 'initscripts': initscripts,
-# 'slivers': slivers
-# }
-#
-# # create the ticket
-# object_gid = record.get_gid_object()
-# new_ticket = SfaTicket(subject = object_gid.get_subject())
-# new_ticket.set_gid_caller(api.auth.client_gid)
-# new_ticket.set_gid_object(object_gid)
-# new_ticket.set_issuer(key=api.key, subject=self.hrn)
-# new_ticket.set_pubkey(object_gid.get_pubkey())
-# new_ticket.set_attributes(data)
-# new_ticket.set_rspec(rspec)
-# #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
-# new_ticket.encode()
-# new_ticket.sign()
-#
-# return new_ticket.save_to_string(save_parents=True)
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.sfalogging import logger
from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
-
from sfa.rspecs.rspec import RSpec
-
from sfa.planetlab.vlink import VLink
-from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
-
-import time
+from sfa.planetlab.topology import Topology
+from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename
+from sfa.storage.model import SliverAllocation
+from sfa.storage.alchemy import dbsession
MAXINT = 2L**31-1
return sfa_peer
- def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
-
- leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ def verify_slice_leases(self, slice, rspec_requested_leases, peer):
+
+ leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
- current_leases = [lease['lease_id'] for lease in leases]
- deleted_leases = list(set(current_leases).difference(kept_leases))
+
+ requested_leases = []
+ for lease in rspec_requested_leases:
+ requested_lease = {}
+ slice_name = hrn_to_pl_slicename(lease['slice_id'])
+ if slice_name != slice['name']:
+ continue
+ elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
+ continue
+
+ hostname = xrn_to_hostname(lease['component_id'])
+ # fill the requested node with nitos ids
+ requested_lease['name'] = slice['name']
+ requested_lease['hostname'] = hostname
+ requested_lease['t_from'] = int(lease['start_time'])
+ requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
+ requested_leases.append(requested_lease)
+
+
+
+ # prepare actual slice leases by lease_id
+ leases_by_id = {}
+ for lease in leases:
+ leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
+ 't_from': lease['t_from'], 't_until': lease['t_until']}
+
+ added_leases = []
+ kept_leases_id = []
+ deleted_leases_id = []
+ for lease_id in leases_by_id:
+ if leases_by_id[lease_id] not in requested_leases:
+ deleted_leases_id.append(lease_id)
+ else:
+ kept_leases_id.append(lease_id)
+ requested_leases.remove(leases_by_id[lease_id])
+ added_leases = requested_leases
+
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
- deleted=self.driver.shell.DeleteLeases(deleted_leases)
- for lease in requested_leases:
- added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['start_time']), int(lease['duration']) * grain + int(lease['start_time']))
+ self.driver.shell.DeleteLeases(deleted_leases_id)
+ for lease in added_leases:
+ self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
return leases
- def verify_slice_nodes(self, slice, slivers, peer):
+ def verify_slice_nodes(self, slice_urn, slice, rspec_nodes, peer):
+
+ slivers = {}
+ for node in rspec_nodes:
+ hostname = node.get('component_name')
+ client_id = node.get('client_id')
+ component_id = node.get('component_id').strip()
+ if hostname:
+ hostname = hostname.strip()
+ elif component_id:
+ hostname = xrn_to_hostname(component_id)
+ if hostname:
+ slivers[hostname] = {'client_id': client_id, 'component_id': component_id}
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
+ requested_slivers = []
+ tags = []
+ for node in slivers:
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name').strip()
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id').strip())
+ if node.get('client_id'):
+ tags.append({'slicename': slice['name'],
+ 'tagname': 'client_id',
+ 'value': node['client_id'],
+ 'node': hostname})
+ if hostname:
+ requested_slivers.append(hostname)
+
# remove nodes not in rspec
- deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+ deleted_nodes = list(set(current_slivers).difference(slivers.keys()))
# add nodes from rspec
- added_nodes = list(set(requested_slivers).difference(current_slivers))
+ added_nodes = list(set(slivers.keys()).difference(current_slivers))
try:
if peer:
except:
logger.log_exc('Failed to add/remove slice from nodes')
- # add tags
- for tag in tags:
- try:
- self.driver.shell.AddSliceTag(tag['slicename'], tag['tagname'], tag['value'], tag['node'])
- except:
- logger.log_exc('Failed to add slice tag')
- return nodes
+ slices = self.driver.shell.GetSlices(slice['name'], ['node_ids'])
+ resulting_nodes = self.driver.shell.GetNodes(slices[0]['node_ids'])
+
+ # update sliver allocations
+ for node in resulting_nodes:
+ client_id = slivers[node['hostname']]['client_id']
+ component_id = slivers[node['hostname']]['component_id']
+ sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
+ sliver_id = Xrn(sliver_hrn, type='sliver').urn
+ record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
+ component_id=component_id,
+ slice_urn = slice_urn,
+ allocation_state='geni_allocated')
+ record.sync()
+ return resulting_nodes
def free_egre_key(self):
used = set()
return str(key)
def verify_slice_links(self, slice, requested_links, nodes):
- # nodes is undefined here
+
if not requested_links:
return
-
+
+ # exit if links are not supported here
+ topology = Topology()
+ if not topology:
+ return
+
# build dict of nodes
nodes_dict = {}
interface_ids = []
for link in requested_links:
# get the ip address of the first node in the link
ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
- (node_raw, device) = ifname1.split(':')
+ ifname_parts = ifname1.split(':')
+ node_raw = ifname_parts[0]
+ device = None
+ if len(ifname_parts) > 1:
+ device = ifname_parts[1]
node_id = int(node_raw.replace('node', ''))
node = nodes_dict[node_id]
if1 = interfaces_dict[node['interface_ids'][0]]
return site
- def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, options={}):
+ def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, expiration, options={}):
slicename = hrn_to_pl_slicename(slice_hrn)
parts = slicename.split("_")
login_base = parts[0]
slices = self.driver.shell.GetSlices([slicename])
+ expires = int(datetime_to_epoch(utcparse(expiration)))
if not slices:
slice = {'name': slicename,
- 'url': slice_record.get('url', slice_hrn),
- 'description': slice_record.get('description', slice_hrn)}
+ 'url': 'No Url',
+ 'description': 'No Description'}
# add the slice
slice['slice_id'] = self.driver.shell.AddSlice(slice)
slice['node_ids'] = []
slice['person_ids'] = []
- if peer:
- slice['peer_slice_id'] = slice_record.get('slice_id', None)
- # mark this slice as an sfa peer record
-# if sfa_peer:
-# peer_dict = {'type': 'slice', 'hrn': slice_hrn,
-# 'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
-# self.registry.register_peer_object(self.credential, peer_dict)
+ if peer and slice_record:
+ slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ # set the expiration
+ self.driver.shell.UpdateSlice(slice['slice_id'], {'expires': expires})
else:
slice = slices[0]
- if peer:
+ if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# unbind from peer so we can modify if necessary. Will bind back later
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
- #Update existing record (e.g. expires field) it with the latest info.
- if slice_record.get('expires'):
- requested_expires = int(datetime_to_epoch(utcparse(slice_record['expires'])))
- if requested_expires and slice['expires'] != requested_expires:
- self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : requested_expires})
+
+ #Update expiration if necessary
+ if slice['expires'] != expires:
+ self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : expires})
return slice
from sfa.rspecs.elements.element import Element
-class Node(Element):
+class NodeElement(Element):
fields = [
+ 'client_id',
'component_id',
'component_name',
'component_manager_id',
from sfa.util.xrn import Xrn
from sfa.util.xml import XpathFilter
-from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.hardware_type import HardwareType
from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
+ from sfa.rspecs.elements.granularity import Granularity
from sfa.planetlab.plxrn import xrn_to_hostname
# set location
if node.get('location'):
node_elem.add_instance('location', node['location'], Location.fields)
+
+ # set granularity
+ if node['exclusive'] == "true":
+ granularity = node.get('granularity')
+ node_elem.add_instance('granularity', granularity, granularity.fields)
# set interfaces
PGv2Interface.add_interfaces(node_elem, node.get('interfaces'))
#if node.get('interfaces'):
# for interface in node.get('interfaces', []):
# node_elem.add_instance('interface', interface, ['component_id', 'client_id'])
# set available element
- if node.get('boot_state'):
- if node.get('boot_state').lower() == 'boot':
- available_elem = node_elem.add_element('available', now='true')
- else:
- available_elem = node_elem.add_element('available', now='false')
+ if node.get('available'):
+ available_elem = node_elem.add_element('available', now=node['available'])
# add services
PGv2Services.add_services(node_elem, node.get('services', []))
# add slivers
for initscript in node.get('pl_initscripts', []):
slivers['tags'].append({'name': 'initscript', 'value': initscript['name']})
PGv2SliverType.add_slivers(node_elem, slivers)
-
return node_elems
@staticmethod
def get_node_objs(node_elems):
nodes = []
for node_elem in node_elems:
- node = Node(node_elem.attrib, node_elem)
+ node = NodeElement(node_elem.attrib, node_elem)
nodes.append(node)
if 'component_id' in node_elem.attrib:
node['authority_id'] = Xrn(node_elem.attrib['component_id']).get_authority_urn()
if len(locations) > 0:
node['location'] = locations[0]
+ # get granularity
+ granularity_elems = node_elem.xpath('./default:granularity | ./granularity')
+ if len(granularity_elems) > 0:
+ node['granularity'] = granularity_elems[0].get_instance(Granularity)
+
# get interfaces
iface_elems = node_elem.xpath('./default:interface | ./interface')
node['interfaces'] = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
from sfa.util.xrn import Xrn
from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.hardware_type import HardwareType
else:
network_elem = xml
- lease_elems = []
- for lease in leases:
- lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
- lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+
+
+ # lease_elems = []
+ # for lease in leases:
+ # lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ # lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ # lease_elems.append(lease_elem)
+
@staticmethod
def get_leases(xml, filter={}):
@staticmethod
def get_lease_objs(lease_elems):
- leases = []
+ leases = []
for lease_elem in lease_elems:
- lease = Lease(lease_elem.attrib, lease_elem)
- if lease.get('lease_id'):
- lease['lease_id'] = lease_elem.attrib['lease_id']
- lease['component_id'] = lease_elem.attrib['component_id']
- lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = lease_elem.attrib['start_time']
- lease['duration'] = lease_elem.attrib['duration']
-
- leases.append(lease)
- return leases
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+
+ return leases
+
+
+
+
+
+ # leases = []
+ # for lease_elem in lease_elems:
+ # lease = Lease(lease_elem.attrib, lease_elem)
+ # if lease.get('lease_id'):
+ # lease['lease_id'] = lease_elem.attrib['lease_id']
+ # lease['component_id'] = lease_elem.attrib['component_id']
+ # lease['slice_id'] = lease_elem.attrib['slice_id']
+ # lease['start_time'] = lease_elem.attrib['start_time']
+ # lease['duration'] = lease_elem.attrib['duration']
+
+ # leases.append(lease)
+ # return leases
from sfa.util.xrn import Xrn
from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.hardware_type import HardwareType
if location:
node_elem.add_instance('location', location, Location.fields)
- # add granularity of the reservation system
- granularity = node.get('granularity')
- if granularity:
- node_elem.add_instance('granularity', granularity, granularity.fields)
+ # add exclusive tag to distinguish between Reservable and Shared nodes
+ exclusive_elem = node_elem.add_element('exclusive')
+ if node.get('exclusive') and node.get('exclusive') == 'true':
+ exclusive_elem.set_text('TRUE')
+ # add granularity of the reservation system
+ granularity = node.get('granularity')
+ if granularity:
+ node_elem.add_instance('granularity', granularity, granularity.fields)
+ else:
+ exclusive_elem.set_text('FALSE')
if isinstance(node.get('interfaces'), list):
tags = node.get('tags', [])
if tags:
for tag in tags:
- tag_elem = node_elem.add_element(tag['tagname'])
- tag_elem.set_text(tag['value'])
+ # backdoor for FITeagle
+ # Alexander Willner <alexander.willner@tu-berlin.de>
+ if tag['tagname']=="fiteagle_settings":
+ tag_elem = node_elem.add_element(tag['tagname'])
+ for subtag in tag['value']:
+ subtag_elem = tag_elem.add_element('setting')
+ subtag_elem.set('name', str(subtag['tagname']))
+ subtag_elem.set('description', str(subtag['description']))
+ subtag_elem.set_text(subtag['value'])
+ else:
+ tag_elem = node_elem.add_element(tag['tagname'])
+ tag_elem.set_text(tag['value'])
SFAv1Sliver.add_slivers(node_elem, node.get('slivers', []))
@staticmethod
def get_node_objs(node_elems):
nodes = []
for node_elem in node_elems:
- node = Node(node_elem.attrib, node_elem)
+ node = NodeElement(node_elem.attrib, node_elem)
if 'site_id' in node_elem.attrib:
node['authority_id'] = node_elem.attrib['site_id']
# get location
# get slivers
node['slivers'] = SFAv1Sliver.get_slivers(node_elem)
# get tags
- node['tags'] = SFAv1PLTag.get_pl_tags(node_elem, ignore=Node.fields+["hardware_type"])
+ node['tags'] = SFAv1PLTag.get_pl_tags(node_elem, ignore=NodeElement.fields+["hardware_type"])
# get hardware types
hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
self.user_options = user_options
self.elements = {}
if rspec:
- self.parse_xml(rspec)
+ if version:
+ self.version = self.version_manager.get_version(version)
+ self.parse_xml(rspec, version)
+ else:
+ self.parse_xml(rspec)
elif version:
self.create(version)
else:
"""
self.version = self.version_manager.get_version(version)
self.namespaces = self.version.namespaces
- self.parse_xml(self.version.template)
+ self.parse_xml(self.version.template, self.version)
# eg. 2011-03-23T19:53:28Z
date_format = '%Y-%m-%dT%H:%M:%SZ'
now = datetime.utcnow()
self.xml.set('generated', generated_ts)
- def parse_xml(self, xml):
+ def parse_xml(self, xml, version=None):
self.xml.parse_xml(xml)
- self.version = None
- if self.xml.schema:
- self.version = self.version_manager.get_version_by_schema(self.xml.schema)
- else:
- #raise InvalidRSpec('unknown rspec schema: %s' % schema)
- # TODO: Should start raising an exception once SFA defines a schema.
- # for now we just default to sfa
- self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
+ if not version:
+ if self.xml.schema:
+ self.version = self.version_manager.get_version_by_schema(self.xml.schema)
+ else:
+ #raise InvalidRSpec('unknown rspec schema: %s' % schema)
+ # TODO: Should start raising an exception once SFA defines a schema.
+ # for now we just default to sfa
+ self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
self.version.xml = self.xml
self.namespaces = self.xml.namespaces
def filter(self, filter):
if 'component_manager_id' in filter:
- nodes = self.version.get_node_elements()
+ nodes = self.version.get_nodes()
for node in nodes:
if 'component_manager_id' not in node.attrib or \
node.attrib['component_manager_id'] != filter['component_manager_id']:
parent = node.getparent()
- parent.remove(node)
+ parent.remove(node.element)
def toxml(self, header=True):
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.credential import Credential
from sfa.trust.rights import determine_rights
-
+from sfa.util.version import version_core
from sfa.server.xmlrpcapi import XmlrpcApi
-
from sfa.client.return_value import ReturnValue
delegated_cred = None
for cred in creds:
- if hierarchy.auth_exists(Credential(string=cred).get_gid_caller().get_hrn()):
+ if hierarchy.auth_exists(Credential(cred=cred).get_gid_caller().get_hrn()):
delegated_cred = cred
break
return delegated_cred
code = {
'geni_code': GENICODE.SUCCESS,
'am_type': 'sfa',
- 'am_code': None,
}
if isinstance(result, SfaFault):
code['geni_code'] = result.faultCode
output = result.faultString
return output
- def prepare_response_v2_am(self, result):
+ def prepare_response_am(self, result):
+ version = version_core()
response = {
- 'geni_api': 2,
+ 'geni_api': 3,
'code': self.get_geni_code(result),
'value': self.get_geni_value(result),
'output': self.get_geni_output(result),
"""
# as of dec 13 2011 we only support API v2
if self.interface.lower() in ['aggregate', 'slicemgr']:
- result = self.prepare_response_v2_am(result)
+ result = self.prepare_response_am(result)
return XmlrpcApi.prepare_response(self, result, method)
from types import StringTypes
from datetime import datetime
-from sqlalchemy import Integer, String, DateTime
+from sqlalchemy import or_, and_
+from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy import Table, Column, MetaData, join, ForeignKey
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import column_property
result += ">"
return result
+class SliverAllocation(Base,AlchemyObj):
+ __tablename__ = 'sliver_allocation'
+ sliver_id = Column(String, primary_key=True)
+ client_id = Column(String)
+ component_id = Column(String)
+ slice_urn = Column(String)
+ allocation_state = Column(String)
+
+ def __init__(self, **kwds):
+ if 'sliver_id' in kwds:
+ self.sliver_id = kwds['sliver_id']
+ if 'client_id' in kwds:
+ self.client_id = kwds['client_id']
+ if 'component_id' in kwds:
+ self.component_id = kwds['component_id']
+ if 'slice_urn' in kwds:
+ self.slice_urn = kwds['slice_urn']
+ if 'allocation_state' in kwds:
+ self.allocation_state = kwds['allocation_state']
+
+ def __repr__(self):
+ result = "<sliver_allocation sliver_id=%s allocation_state=%s" % \
+ (self.sliver_id, self.allocation_state)
+ return result
+
+ @validates('allocation_state')
+ def validate_allocation_state(self, key, state):
+ allocation_states = ['geni_unallocated', 'geni_allocated', 'geni_provisioned']
+ assert state in allocation_states
+ return state
+
+ @staticmethod
+ def set_allocations(sliver_ids, state):
+ from sfa.storage.alchemy import dbsession
+ if not isinstance(sliver_ids, list):
+ sliver_ids = [sliver_ids]
+ sliver_state_updated = {}
+ constraint = SliverAllocation.sliver_id.in_(sliver_ids)
+ sliver_allocations = dbsession.query (SliverAllocation).filter(constraint)
+ sliver_ids_found = []
+ for sliver_allocation in sliver_allocations:
+ sliver_allocation.allocation_state = state
+ sliver_ids_found.append(sliver_allocation.sliver_id)
+
+ # Some states may not have been updated becuase no sliver allocation state record
+ # exists for the sliver. Insert new allocation records for these slivers and set
+ # it to geni_allocated.
+ sliver_ids_not_found = set(sliver_ids).difference(sliver_ids_found)
+ for sliver_id in sliver_ids_not_found:
+ record = SliverAllocation(sliver_id=sliver_id, allocation_state=state)
+ dbsession.add(record)
+ dbsession.commit()
+
+ @staticmethod
+ def delete_allocations(sliver_ids):
+ from sfa.storage.alchemy import dbsession
+ if not isinstance(sliver_ids, list):
+ sliver_ids = [sliver_ids]
+ constraint = SliverAllocation.sliver_id.in_(sliver_ids)
+ sliver_allocations = dbsession.query(SliverAllocation).filter(constraint)
+ for sliver_allocation in sliver_allocations:
+ dbsession.delete(sliver_allocation)
+ dbsession.commit()
+
+ def sync(self):
+ from sfa.storage.alchemy import dbsession
+
+ constraints = [SliverAllocation.sliver_id==self.sliver_id]
+ results = dbsession.query(SliverAllocation).filter(and_(*constraints))
+ records = []
+ for result in results:
+ records.append(result)
+
+ if not records:
+ dbsession.add(self)
+ else:
+ record = records[0]
+ record.sliver_id = self.sliver_id
+ record.client_id = self.client_id
+ record.component_id = self.component_id
+ record.slice_urn = self.slice_urn
+ record.allocation_state = self.allocation_state
+ dbsession.commit()
+
+
##############################
# although the db needs of course to be reachable for the following functions
# the schema management functions are here and not in alchemy
logger.info("load from xml, keys=%s"%xml_dict.keys())
return make_record_dict (xml_dict)
+ ####################
+ # augment local records with data from builtin relationships
+ # expose related objects as a list of hrns
+ # we pick names that clearly won't conflict with the ones used in the old approach,
+ # were the relationships data came from the testbed side
+ # for each type, a dict of the form {<field-name-exposed-in-record>:<alchemy_accessor_name>}
+ # so after that, an 'authority' record will e.g. have a 'reg-pis' field with the hrns of its pi-users
+ augment_map={'authority': {'reg-pis':'reg_pis',},
+ 'slice': {'reg-researchers':'reg_researchers',},
+ 'user': {'reg-pi-authorities':'reg_authorities_as_pi',
+ 'reg-slices':'reg_slices_as_researcher',},
+ }
+
+ def augment_with_sfa_builtins (local_record):
+ # don't ruin the import of that file in a client world
+ from sfa.util.xrn import Xrn
+ # add a 'urn' field
+ setattr(local_record,'reg-urn',Xrn(xrn=local_record.hrn,type=local_record.type).urn)
+ # users have keys and this is needed to synthesize 'users' sent over to CreateSliver
+ if local_record.type=='user':
+ user_keys = [ key.key for key in local_record.reg_keys ]
+ setattr(local_record, 'reg-keys', user_keys)
+ # search in map according to record type
+ type_map=augment_map.get(local_record.type,{})
+ # use type-dep. map to do the job
+ for (field_name,attribute) in type_map.items():
+ # get related objects
+ related_records = getattr(local_record,attribute,[])
+ hrns = [ r.hrn for r in related_records ]
+ setattr (local_record, field_name, hrns)
+
+
# @param string If string!=None, load the credential from the string
# @param filename If filename!=None, load the credential from the file
# FIXME: create and subject are ignored!
- def __init__(self, create=False, subject=None, string=None, filename=None):
+ def __init__(self, create=False, subject=None, string=None, filename=None, cred=None):
self.gidCaller = None
self.gidObject = None
self.expiration = None
self.xml = None
self.refid = None
self.legacy = None
+ self.type = None
+ self.version = None
+
+ if cred:
+ if isinstance(cred, StringTypes):
+ string = cred
+ self.type = 'geni_sfa'
+ self.version = '1.0'
+ elif isinstance(cred, dict):
+ string = cred['geni_value']
+ self.type = cred['geni_type']
+ self.version = cred['geni_version']
+
# Check if this is a legacy credential, translate it if so
if string or filename:
if os.path.isfile(path + '/' + 'xmlsec1'):
self.xmlsec_path = path + '/' + 'xmlsec1'
break
- if not self.xmlsec_path:
- logger.warn("Could not locate binary for xmlsec1 - SFA will be unable to sign stuff !!")
def get_subject(self):
+ subject = ""
if not self.gidObject:
self.decode()
- return self.gidObject.get_subject()
+ if self.gidObject:
+ subject = self.gidObject.get_printable_subject()
+ return subject
# sounds like this should be __repr__ instead ??
def get_summary_tostring(self):
# you have loaded an existing signed credential, do not call encode() or sign() on it.
def sign(self):
- if not self.issuer_privkey:
- logger.warn("Cannot sign credential (no private key)")
- return
- if not self.issuer_gid:
- logger.warn("Cannot sign credential (no issuer gid)")
+ if not self.issuer_privkey or not self.issuer_gid:
return
doc = parseString(self.get_xml())
sigs = doc.getElementsByTagName("signatures")[0]
# Call out to xmlsec1 to sign it
ref = 'Sig_%s' % self.get_refid()
filename = self.save_to_random_tmp_file()
- command='%s --sign --node-id "%s" --privkey-pem %s,%s %s' \
- % (self.xmlsec_path, ref, self.issuer_privkey, ",".join(gid_files), filename)
-# print 'command',command
- signed = os.popen(command).read()
+ signed = os.popen('%s --sign --node-id "%s" --privkey-pem %s,%s %s' \
+ % (self.xmlsec_path, ref, self.issuer_privkey, ",".join(gid_files), filename)).read()
os.remove(filename)
for gid_file in gid_files:
def decode(self):
if not self.xml:
return
+
+ doc = None
+ try:
+ doc = parseString(self.xml)
+ except ExpatError,e:
+ raise CredentialNotVerifiable("Malformed credential")
doc = parseString(self.xml)
sigs = []
signed_cred = doc.getElementsByTagName("signed-credential")
print self.dump_string(*args, **kwargs)
- def dump_string(self, dump_parents=False, show_xml=False):
+ def dump_string(self, dump_parents=False):
result=""
result += "CREDENTIAL %s\n" % self.get_subject()
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
- result += " privs: %s\n" % self.get_privileges().save_to_string()
+ privileges = self.get_privileges()
+ if privileges:
+ result += " privs: %s\n" % privileges.save_to_string()
+ else:
+ result += " privs: \n"
gidCaller = self.get_gid_caller()
if gidCaller:
result += " gidCaller:\n"
print " gidIssuer:"
self.get_signature().get_issuer_gid().dump(8, dump_parents)
+ if self.expiration:
+ print " expiration:", self.expiration.isoformat()
+
gidObject = self.get_gid_object()
if gidObject:
result += " gidObject:\n"
result += "\nPARENT"
result += self.parent.dump_string(True)
- if show_xml:
- try:
- tree = etree.parse(StringIO(self.xml))
- aside = etree.tostring(tree, pretty_print=True)
- result += "\nXML\n"
- result += aside
- result += "\nEnd XML\n"
- except:
- import traceback
- print "exc. Credential.dump_string / XML"
- traceback.print_exc()
-
return result