VERSIONTAG=$(rpmversion)-$(rpmtaglevel)
# this used to be 'should-be-redefined-by-specfile' and it indeed should be
SCMURL=git://git.onelab.eu/sfa.git
-TARBALL_HOST=root@build.onelab.eu
-TARBALL_TOPDIR=/build/sfa
-# I have an alternate pypitest entry defined in my .pypirc
-PYPI_TARGET=pypi
python: version
.PHONY: signatures
########## for uploading onto pypi
+# use pypitest instead for tests (both entries need to be defined in your .pypirc)
+PYPI_TARGET=pypi
+PYPI_TARBALL_HOST=root@build.onelab.eu
+PYPI_TARBALL_TOPDIR=/build/sfa
+
# a quick attempt on pypitest did not quite work as expected
# I was hoping to register the project using "setup.py register"
# but somehow most of my meta data did not make it up there
# run this only once the sources are in on the right tag
pypi: index.html
setup.py sdist upload -r $(PYPI_TARGET)
- ssh $(TARBALL_HOST) mkdir -p $(TARBALL_TOPDIR)/$(VERSIONTAG)
- rsync -av dist/sfa-$(VERSIONTAG).tar.gz $(TARBALL_HOST):$(TARBALL_TOPDIR)/$(VERSIONTAG)
+ ssh $(PYPI_TARBALL_HOST) mkdir -p $(PYPI_TARBALL_TOPDIR)/$(VERSIONTAG)
+ rsync -av dist/sfa-$(VERSIONTAG).tar.gz $(PYPI_TARBALL_HOST):$(PYPI_TARBALL_TOPDIR)/$(VERSIONTAG)
# cleanup
clean: readme-clean
else
ifdef PLCHOSTLXC
SSHURL:=root@$(PLCHOSTLXC):/vservers/$(GUESTNAME)
-SSHCOMMAND:=ssh root@$(PLCHOSTLXC) virsh -c lxc:/// lxc-enter-namespace $(GUESTNAME) -- /usr/bin/env
+SSHCOMMAND:=ssh root@$(PLCHOSTLXC) ssh -o StrictHostKeyChecking=no $(GUESTHOSTNAME)
else
ifdef PLCHOSTVS
SSHURL:=root@$(PLCHOSTVS):/vservers/$(GUESTNAME)
+$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/lib\*/python2.\*/site-packages/
synclibdeb: synccheck
+$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/share/pyshared/
+syncmigrations:
+ +$(RSYNC) ./sfa/storage/migrations/versions/*.py $(SSHURL)/usr/share/sfa/migrations/versions/
syncbin: synccheck
+$(RSYNC) $(BINS) $(SSHURL)/usr/bin/
syncinit: synccheck
if keys: print tab * (counter) + "(children: %s)" % (",".join(keys))
+#
+# this code probably is obsolete
+# RSpec is not imported, it does not have a toDict() method anyway
+# plus, getNodes.py is not exposed in packaging
+#
def main():
parser = create_parser();
(options, args) = parser.parse_args()
if not options.infile:
print "RSpec file not specified"
return
-
+
rspec = RSpec()
try:
rspec.parseFile(options.infile)
Depends: sfa-common, python-passlib, python-ldap
Description: the SFA layer around IotLab
-Package: sfa-cortexlab
-Architecture: any
-Depends: sfa-common, python-passlib, python-ldap
-Description: the SFA layer around CortexLab
-
Package: sfa-dummy
Architecture: any
Depends: sfa-common
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/cortexlab
}
####################
+POSTGRESQL_STARTED=/etc/sfa/postgresql-started
if [ -f /etc/redhat-release ] ; then
# source function library
. /etc/init.d/functions
# only if enabled
[ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
- #if ! rpm -q myplc >& /dev/null; then
-
- ######## standalone deployment - no colocated myplc
-
- ######## sysconfig
- # Set data directory and redirect startup output to /var/log/pgsql
- mkdir -p $(dirname $postgresql_sysconfig)
- # remove previous definitions
- touch $postgresql_sysconfig
- tmp=${postgresql_sysconfig}.new
- ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig
- echo "PGDATA=$PGDATA"
- echo "PGLOG=/var/log/pgsql"
- echo "PGPORT=$SFA_DB_PORT"
- ) >> $tmp ; mv -f $tmp $postgresql_sysconfig
-
- ######## /var/lib/pgsql/data
- # Fix ownership (rpm installation may have changed it)
- chown -R -H postgres:postgres $(dirname $PGDATA)
-
- # PostgreSQL must be started at least once to bootstrap
- # /var/lib/pgsql/data
- if [ ! -f $postgresql_conf ] ; then
- service postgresql initdb &> /dev/null || :
- check
- fi
-
- ######## /var/lib/pgsql/data/postgresql.conf
- registry_ip=""
- foo=$(python -c "import socket; print socket.gethostbyname(\"$SFA_REGISTRY_HOST\")") && registry_ip="$foo"
- # Enable DB server. drop Postgresql<=7.x
- # PostgreSQL >=8.0 defines listen_addresses
- # listen on a specific IP + localhost, more robust when run within a vserver
- sed -i -e '/^listen_addresses/d' $postgresql_conf
- if [ -z "$registry_ip" ] ; then
- echo "listen_addresses = 'localhost'" >> $postgresql_conf
- else
- echo "listen_addresses = '${registry_ip},localhost'" >> $postgresql_conf
- fi
- # tweak timezone to be 'UTC'
- sed -i -e '/^timezone=/d' $postgresql_conf
- echo "timezone='UTC'" >> $postgresql_conf
-
- ######## /var/lib/pgsql/data/pg_hba.conf
- # Disable access to all DBs from all hosts
- sed -i -e '/^\(host\|local\)/d' $pg_hba_conf
-
- # Enable passwordless localhost access
- echo "local all all trust" >>$pg_hba_conf
- # grant access
- (
- echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
- [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
- ) >>$pg_hba_conf
-
- if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
- [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pg_hba_conf
- fi
-
- # Fix ownership (sed -i changes it)
- chown postgres:postgres $postgresql_conf $pg_hba_conf
-
- ######## compute a password if needed
- if [ -z "$SFA_DB_PASSWORD" ] ; then
- SFA_DB_PASSWORD=$(uuidgen)
- sfa-config --category=sfa_db --variable=password --value="$SFA_DB_PASSWORD" --save=$sfa_local_config $sfa_local_config >& /dev/null
- reload force
- fi
-
- #else
+ ######## sysconfig
+ # Set data directory and redirect startup output to /var/log/pgsql
+ mkdir -p $(dirname $postgresql_sysconfig)
+ # remove previous definitions
+ touch $postgresql_sysconfig
+ tmp=${postgresql_sysconfig}.new
+ ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig
+ echo "PGDATA=$PGDATA"
+ echo "PGLOG=/var/log/pgsql"
+ echo "PGPORT=$SFA_DB_PORT"
+ ) >> $tmp ; mv -f $tmp $postgresql_sysconfig
+
+ ######## /var/lib/pgsql/data
+ # Fix ownership (rpm installation may have changed it)
+ chown -R -H postgres:postgres $(dirname $PGDATA)
+
+ # PostgreSQL must be started at least once to bootstrap
+ # /var/lib/pgsql/data
+ if [ ! -f $postgresql_conf ] ; then
+ service postgresql initdb &> /dev/null || :
+ check
+ fi
- ######## we are colocated with a myplc
- # no need to worry about the pgsql setup (see /etc/plc.d/postgresql)
- # myplc enforces the password for its user
-
- # The code below overwrites the site specific sfa db info with myplc db info.
- # This is most likely unncecessary and wrong so I'm commenting it out for now.
- # PLC_DB_USER=$(plc-config --category=plc_db --variable=user)
- # PLC_DB_PASSWORD=$(plc-config --category=plc_db --variable=password)
- # store this as the SFA user/password
- # sfa-config --category=sfa_db --variable=user --value=$PLC_DB_USER --save=$sfa_local_config $sfa_local_config >& /dev/null
- # sfa-config --category=sfa_db --variable=password --value=$PLC_DB_PASSWORD --save=$sfa_local_config $sfa_local_config >& /dev/null
- # reload force
- #fi
+ ######## /var/lib/pgsql/data/postgresql.conf
+ registry_ip=""
+ foo=$(python -c "import socket; print socket.gethostbyname(\"$SFA_REGISTRY_HOST\")") && registry_ip="$foo"
+ # Enable DB server. drop Postgresql<=7.x
+ # PostgreSQL >=8.0 defines listen_addresses
+ # listen on a specific IP + localhost, more robust when run within a vserver
+ sed -i -e '/^listen_addresses/d' $postgresql_conf
+ if [ -z "$registry_ip" ] ; then
+ echo "listen_addresses = 'localhost'" >> $postgresql_conf
+ else
+ echo "listen_addresses = '${registry_ip},localhost'" >> $postgresql_conf
+ fi
+ # tweak timezone to be 'UTC'
+ sed -i -e '/^timezone=/d' $postgresql_conf
+ echo "timezone='UTC'" >> $postgresql_conf
+
+ ######## /var/lib/pgsql/data/pg_hba.conf
+ # Disable access to all DBs from all hosts
+ sed -i -e '/^\(host\|local\)/d' $pg_hba_conf
+
+ # Enable passwordless localhost access
+ echo "local all all trust" >>$pg_hba_conf
+ # grant access
+ (
+ echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
+ [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
+ ) >>$pg_hba_conf
+
+ if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
+ [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pg_hba_conf
+ fi
+
+ # Fix ownership (sed -i changes it)
+ chown postgres:postgres $postgresql_conf $pg_hba_conf
+
+ ######## compute a password if needed
+ if [ -z "$SFA_DB_PASSWORD" ] ; then
+ SFA_DB_PASSWORD=$(uuidgen)
+ sfa-config --category=sfa_db --variable=password --value="$SFA_DB_PASSWORD" --save=$sfa_local_config $sfa_local_config >& /dev/null
+ reload force
+ fi
######## Start up the server
# not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if [ ! -f /etc/myplc-release ] ; then
- echo STARTING...
+ postgresql_check || {
service postgresql start >& /dev/null
- fi
+ MESSAGE=$"Starting PostgreSQL server"
+ echo -n "$MESSAGE"
+ [ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
+ # best-effort to make sure we turn it back off when running stop
+ touch $POSTGRESQL_STARTED
+ }
postgresql_check
check
[ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
# not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if [ ! -f /etc/myplc-release ] ; then
+ if [ -f $POSTGRESQL_STARTED ] ; then
service postgresql stop >& /dev/null
check
MESSAGE=$"Stopping PostgreSQL server"
echo -n "$MESSAGE"
[ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
+ rm -f $POSTGRESQL_STARTED
fi
}
'sfa/openstack',
'sfa/federica',
'sfa/iotlab',
- 'sfa/cortexlab',
'sfatables',
'sfatables/commands',
'sfatables/processors',
except:
long_description = "Unable to read index.html"
- setup(name='sfa',
- packages = packages,
- data_files = data_files,
- version=version_tag,
- keywords = ['federation','testbeds','SFA','SfaWrap'],
- url="http://svn.planet-lab.org/wiki/SFATutorial",
- author="Thierry Parmentelat, Tony Mack, Scott Baker",
- author_email="thierry.parmentelat@inria.fr, tmack@princeton.cs.edu, smbaker@gmail.com",
- download_url = "http://build.onelab.eu/sfa/{v}/sfa-{v}.tar.gz".format(v=version_tag),
- description="SFA Wrapper with drivers for PlanetLab and IotLab and others",
- license = license,
- long_description = long_description,
- scripts = scripts,
+ setup(
+ name = 'sfa',
+ packages = packages,
+ data_files = data_files,
+ version = version_tag,
+ keywords = ['federation','testbeds','SFA','SfaWrap'],
+ url = "http://svn.planet-lab.org/wiki/SFATutorial",
+ author = "Thierry Parmentelat, Tony Mack, Scott Baker",
+ author_email = "thierry.parmentelat@inria.fr, tmack@princeton.cs.edu, smbaker@gmail.com",
+ download_url = "http://build.onelab.eu/sfa/{v}/sfa-{v}.tar.gz".format(v=version_tag),
+ description = "SFA Wrapper with drivers for PlanetLab and IotLab and others",
+ license = license,
+ long_description = long_description,
+ scripts = scripts,
)
%define name sfa
%define version 3.1
-%define taglevel 13
+%define taglevel 18
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Group: Applications/System
Requires: sfa
-%package cortexlab
-Summary: the SFA layer around CortexLab
-Group: Applications/System
-Requires: sfa
-
%package dummy
Summary: the SFA layer around a Dummy Testbed
Group: Applications/System
%description iotlab
The SFA driver for IotLab.
-%description cortexlab
-The SFA driver for CortexLab.
-
%description dummy
The SFA driver for a Dummy Testbed.
%files iotlab
%{python_sitelib}/sfa/iotlab
-%files cortexlab
-%{python_sitelib}/sfa/cortexlab
-
%files dummy
%{python_sitelib}/sfa/dummy
#[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Mon Jun 08 2015 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.1-18
+- incorporated Frederic Saint Marcel's addition of ASAP management tag
+
+* Fri Jun 05 2015 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.1-17
+- workaround for 'name' not being exposed properly by List() on authority objects
+- fix a corner case in PL importer
+- trashed module registry_manager_openstack
+
+* Thu Jun 04 2015 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.1-16
+- added a new builtin column 'name' for authorities in the sfa registry
+- this is kept in sync with MyPLC's site names when relevant
+- sfa update -t authority thus now has a new -n/--name option
+- sfi register or update can specify record type on only 2 characters (au, us, no, or sl)
+- reviewed Describe and Allocate wrt slice tags for a PL AM:
+- Describe now exposes all slice tags with a 'scope' being 'sliver' or 'slice'
+- Allocate now by default ignores incoming slice tags
+- Allocate's options can mention 'pltags' among 'ignore', 'append', 'sync'
+- default being 'ignore'
+- in 'ignore' mode, slice tags are unchanged in the PL db
+- in 'append' mode, slice tags from the rspec are added to the db unless
+- they are already present
+- in 'sync' mode, the code attempts to leave the PL db in sync with the tags
+- provided in rspec; this can be dangerous and is thus no longer the default
+- behaviour
+
+* Thu Apr 23 2015 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.1-15
+- major rework of the iotlab driver, that uses an IoT-lab REST API
+- and so does not need to interact with LDAP and OAR directly
+- deprecated cortexlab driver altogether
+- cosmetic changes in displaying credentials, rights and certificates
+- for hopefully more readable error messages
+- always start postgresql if not running (ignore /etc/myplc-release)
+- does not need lxc=enter-namespace anymore for make sync
+
+* Thu Apr 09 2015 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.1-14
+- for SSL & python-2.7.9: ignore server verification
+- assume 2.7: remove compat code - always use HTTPSConnection (not HTTPS anymore)
+- fix: Reset GIDs works even if user has no pub_key
+- tweak for ubuntu (that does not have systemctl)
+- iotlab driver: fix ldap account creation at each lease
+- miscell cosmetic & layout
+
* Mon Dec 01 2014 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.1-13
- bugfix - was adding extraneous backslashes in email address when attempting to AddPerson
print "%s (%s)" % (record['hrn'], record['type'])
def terminal_render_user (record, options):
print "%s (User)"%record['hrn'],
- if record.get('reg-pi-authorities',None): print " [PI at %s]"%(" and ".join(record['reg-pi-authorities'])),
- if record.get('reg-slices',None): print " [IN slices %s]"%(" and ".join(record['reg-slices'])),
+ if options.verbose and record.get('email', None):
+ print "email='{}'".format(record['email']),
+ if record.get('reg-pi-authorities', None):
+ print " [PI at %s]"%(" and ".join(record['reg-pi-authorities'])),
+ if record.get('reg-slices', None):
+ print " [IN slices %s]"%(" and ".join(record['reg-slices'])),
user_keys=record.get('reg-keys',[])
if not options.verbose:
print " [has %s]"%(terminal_render_plural(len(user_keys),"key"))
def terminal_render_slice (record, options):
print "%s (Slice)"%record['hrn'],
- if record.get('reg-researchers',None): print " [USERS %s]"%(" and ".join(record['reg-researchers'])),
+ if record.get('reg-researchers', None):
+ print " [USERS %s]"%(" and ".join(record['reg-researchers'])),
# print record.keys()
print ""
def terminal_render_authority (record, options):
print "%s (Authority)"%record['hrn'],
- if record.get('reg-pis',None): print " [PIS %s]"%(" and ".join(record['reg-pis'])),
+ if options.verbose and record.get('name'):
+ print "name='{}'".format(record['name'])
+ if record.get('reg-pis', None):
+ print " [PIS %s]"%(" and ".join(record['reg-pis'])),
print ""
def terminal_render_node (record, options):
print "%s (Node)"%record['hrn']
### used in sfi list
-def terminal_render (records,options):
+def terminal_render (records, options):
# sort records by type
- grouped_by_type={}
+ grouped_by_type = {}
for record in records:
- type=record['type']
- if type not in grouped_by_type: grouped_by_type[type]=[]
+ type = record['type']
+ if type not in grouped_by_type:
+ grouped_by_type[type]=[]
grouped_by_type[type].append(record)
- group_types=grouped_by_type.keys()
+ group_types = grouped_by_type.keys()
group_types.sort()
for type in group_types:
- group=grouped_by_type[type]
+ group = grouped_by_type[type]
# print 20 * '-', type
- try: renderer=eval('terminal_render_'+type)
- except: renderer=terminal_render_default
- for record in group: renderer(record,options)
-
+ try: renderer = eval('terminal_render_' + type)
+ except: renderer = terminal_render_default
+ for record in group:
+ renderer(record, options)
####################
def filter_records(type, records):
DEFAULT_URL = "http://myslice.onelab.eu:7080"
DEFAULT_PLATFORM = 'ple'
+# starting with 2.7.9 we need to turn off server verification
+import ssl
+ssl_needs_unverified_context = hasattr(ssl, '_create_unverified_context')
+
import xmlrpclib
import getpass
# return self._proxy
url=self.url()
self.logger.debug("Connecting manifold url %s"%url)
- return xmlrpclib.ServerProxy(url, allow_none = True)
+ if not ssl_needs_unverified_context:
+ proxy = xmlrpclib.ServerProxy(url, allow_none = True)
+ else:
+ proxy = xmlrpclib.ServerProxy(url, allow_none = True,
+ context=ssl._create_unverified_context())
+ return proxy
# does the job for one credential
# expects the credential (string) and an optional message (e.g. hrn) for reporting
# XMLRPC-specific code for SFA Client
+# starting with 2.7.9 we need to turn off server verification
+import ssl
+ssl_needs_unverified_context = hasattr(ssl, '_create_unverified_context')
+
import xmlrpclib
from httplib import HTTPS, HTTPSConnection
from sfa.util.sfalogging import logger
except:
import logging
- logger=logging.getLogger('sfaserverproxy')
+ logger = logging.getLogger('sfaserverproxy')
##
# ServerException, ExceptionUnmarshaller
#
# A transport for XMLRPC that works on top of HTTPS
-# python 2.7 xmlrpclib has changed its internal code
-# it now calls 'getresponse' on the obj returned by make_connection
-# while it used to call 'getreply'
-# regardless of the version, httplib.HTTPS does implement getreply,
-# while httplib.HTTPSConnection has getresponse
-# so we create a dummy instance to check what's expected
-need_HTTPSConnection=hasattr(xmlrpclib.Transport().make_connection('localhost'),'getresponse')
+# targetting only python-2.7 we can get rid of some older code
class XMLRPCTransport(xmlrpclib.Transport):
- def __init__(self, key_file=None, cert_file=None, timeout=None):
+ def __init__(self, key_file = None, cert_file = None, timeout = None):
xmlrpclib.Transport.__init__(self)
self.timeout=timeout
self.key_file = key_file
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
host, extra_headers, x509 = self.get_host_info(host)
- if need_HTTPSConnection:
- conn = HTTPSConnection(host, None, key_file=self.key_file, cert_file=self.cert_file)
+ if not ssl_needs_unverified_context:
+ conn = HTTPSConnection(host, None, key_file = self.key_file,
+ cert_file = self.cert_file)
else:
- conn = HTTPS(host, None, key_file=self.key_file, cert_file=self.cert_file)
+ conn = HTTPSConnection(host, None, key_file = self.key_file,
+ cert_file = self.cert_file,
+ context = ssl._create_unverified_context())
# Some logic to deal with timeouts. It appears that some (or all) versions
# of python don't set the timeout after the socket is created. We'll do it
# remember url for GetVersion
# xxx not sure this is still needed as SfaServerProxy has this too
self.url=url
- xmlrpclib.ServerProxy.__init__(self, url, transport, allow_none=allow_none, verbose=verbose)
+ if not ssl_needs_unverified_context:
+ xmlrpclib.ServerProxy.__init__(self, url, transport, allow_none=allow_none,
+ verbose=verbose)
+ else:
+ xmlrpclib.ServerProxy.__init__(self, url, transport, allow_none=allow_none,
+ verbose=verbose,
+ context=ssl._create_unverified_context())
def __getattr__(self, attr):
- logger.debug ("xml-rpc %s method:%s"%(self.url,attr))
+ logger.debug ("xml-rpc %s method:%s" % (self.url, attr))
return xmlrpclib.ServerProxy.__getattr__(self, attr)
########## the object on which we can send methods that get sent over xmlrpc
class SfaServerProxy:
def __init__ (self, url, keyfile, certfile, verbose=False, timeout=None):
- self.url=url
- self.keyfile=keyfile
- self.certfile=certfile
- self.verbose=verbose
- self.timeout=timeout
+ self.url = url
+ self.keyfile = keyfile
+ self.certfile = certfile
+ self.verbose = verbose
+ self.timeout = timeout
# an instance of xmlrpclib.ServerProxy
transport = XMLRPCTransport(keyfile, certfile, timeout)
self.serverproxy = XMLRPCServerProxy(url, transport, allow_none=True, verbose=verbose)
# this module is also used in sfascan
#
+from __future__ import print_function
+
import sys
sys.path.append('.')
from sfa.client.candidates import Candidates
from sfa.client.manifolduploader import ManifoldUploader
-CM_PORT=12346
+CM_PORT = 12346
+DEFAULT_RSPEC_VERSION = "GENI 3"
from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, \
terminal_render, filter_records
else:
result = rspec
- print result
+ print(result)
return
def display_list(results):
for result in results:
- print result
+ print(result)
def display_records(recordList, dump=False):
''' Print all fields in the record'''
record.dump(sort=True)
else:
info = record.getdict()
- print "%s (%s)" % (info['hrn'], info['type'])
+ print("{} ({})".format(info['hrn'], info['type']))
return
def credential_printable (cred):
- credential=Credential(cred=cred)
+ credential = Credential(cred=cred)
result=""
- result += credential.get_summary_tostring()
+ result += credential.pretty_cred()
result += "\n"
rights = credential.get_privileges()
- result += "type=%s\n" % credential.type
- result += "version=%s\n" % credential.version
- result += "rights=%s\n"%rights
+ result += "type={}\n".format(credential.type)
+ result += "version={}\n".format(credential.version)
+ result += "rights={}\n".format(rights)
return result
def show_credentials (cred_s):
if not isinstance (cred_s,list): cred_s = [cred_s]
for cred in cred_s:
- print "Using Credential %s"%credential_printable(cred)
+ print("Using Credential {}".format(credential_printable(cred)))
+
+########## save methods
-# save methods
-def save_raw_to_file(var, filename, format="text", banner=None):
- if filename == "-":
- # if filename is "-", send it to stdout
- f = sys.stdout
+### raw
+def save_raw_to_file(var, filename, format='text', banner=None):
+ if filename == '-':
+ _save_raw_to_file(var, sys.stdout, format, banner)
else:
- f = open(filename, "w")
- if banner:
- f.write(banner+"\n")
+ with open(filename, w) as fileobj:
+ _save_raw_to_file(var, fileobj, format, banner)
+ print("(Over)wrote {}".format(filename))
+
+def _save_raw_to_file(var, f, format, banner):
if format == "text":
- f.write(str(var))
+ if banner: f.write(banner+"\n")
+ f.write("{}".format(var))
+ if banner: f.write('\n'+banner+"\n")
elif format == "pickled":
f.write(pickle.dumps(var))
elif format == "json":
- if hasattr(json, "dumps"):
- f.write(json.dumps(var)) # python 2.6
- else:
- f.write(json.write(var)) # python 2.5
+ f.write(json.dumps(var)) # python 2.6
else:
# this should never happen
- print "unknown output format", format
- if banner:
- f.write('\n'+banner+"\n")
+ print("unknown output format", format)
+###
def save_rspec_to_file(rspec, filename):
if not filename.endswith(".rspec"):
filename = filename + ".rspec"
- f = open(filename, 'w')
- f.write("%s"%rspec)
- f.close()
- return
+ with open(filename, 'w') as f:
+ f.write("{}".format(rspec))
+ print("(Over)wrote {}".format(filename))
+
+def save_record_to_file(filename, record_dict):
+ record = Record(dict=record_dict)
+ xml = record.save_as_xml()
+ with codecs.open(filename, encoding='utf-8',mode="w") as f:
+ f.write(xml)
+ print("(Over)wrote {}".format(filename))
def save_records_to_file(filename, record_dicts, format="xml"):
if format == "xml":
- index = 0
- for record_dict in record_dicts:
- if index > 0:
- save_record_to_file(filename + "." + str(index), record_dict)
- else:
- save_record_to_file(filename, record_dict)
- index = index + 1
+ for index, record_dict in enumerate(record_dicts):
+ save_record_to_file(filename + "." + str(index), record_dict)
elif format == "xmllist":
- f = open(filename, "w")
- f.write("<recordlist>\n")
- for record_dict in record_dicts:
- record_obj=Record(dict=record_dict)
- f.write('<record hrn="' + record_obj.hrn + '" type="' + record_obj.type + '" />\n')
- f.write("</recordlist>\n")
- f.close()
+ with open(filename, "w") as f:
+ f.write("<recordlist>\n")
+ for record_dict in record_dicts:
+ record_obj = Record(dict=record_dict)
+ f.write('<record hrn="' + record_obj.hrn + '" type="' + record_obj.type + '" />\n')
+ f.write("</recordlist>\n")
+ print("(Over)wrote {}".format(filename))
+
elif format == "hrnlist":
- f = open(filename, "w")
- for record_dict in record_dicts:
- record_obj=Record(dict=record_dict)
- f.write(record_obj.hrn + "\n")
- f.close()
+ with open(filename, "w") as f:
+ for record_dict in record_dicts:
+ record_obj = Record(dict=record_dict)
+ f.write(record_obj.hrn + "\n")
+ print("(Over)wrote {}".format(filename))
+
else:
# this should never happen
- print "unknown output format", format
-
-def save_record_to_file(filename, record_dict):
- record = Record(dict=record_dict)
- xml = record.save_as_xml()
- f=codecs.open(filename, encoding='utf-8',mode="w")
- f.write(xml)
- f.close()
- return
+ print("unknown output format", format)
# minimally check a key argument
def check_ssh_key (key):
return re.match(good_ssh_key, key, re.IGNORECASE)
# load methods
+def normalize_type (type):
+ if type.startswith('au'):
+ return 'authority'
+ elif type.startswith('us'):
+ return 'user'
+ elif type.startswith('sl'):
+ return 'slice'
+ elif type.startswith('no'):
+ return 'node'
+ elif type.startswith('ag'):
+ return 'aggregate'
+ elif type.startswith('al'):
+ return 'all'
+ else:
+ print('unknown type {} - should start with one of au|us|sl|no|ag|al'.format(type))
+ return None
+
def load_record_from_opts(options):
record_dict = {}
if hasattr(options, 'xrn') and options.xrn:
record_dict['reg-researchers'] = options.reg_researchers
if hasattr(options, 'email') and options.email:
record_dict['email'] = options.email
+ # authorities can have a name for standalone deployment
+ if hasattr(options, 'name') and options.name:
+ record_dict['name'] = options.name
if hasattr(options, 'reg_pis') and options.reg_pis:
record_dict['reg-pis'] = options.reg_pis
return Record(dict=record_dict)
def load_record_from_file(filename):
- f=codecs.open(filename, encoding="utf-8", mode="r")
- xml_string = f.read()
- f.close()
- return Record(xml=xml_string)
-
+ with codecs.open(filename, encoding="utf-8", mode="r") as f:
+ xml_str = f.read()
+ return Record(xml=xml_str)
import uuid
def unique_call_id(): return uuid.uuid4().urn
format3offset=47
line=80*'-'
if not verbose:
- print format3%("command","cmd_args","description")
- print line
+ print(format3%("command", "cmd_args", "description"))
+ print(line)
else:
- print line
+ print(line)
self.create_parser_global().print_help()
# preserve order from the code
for command in commands_list:
try:
(doc, args_string, example, canonical) = commands_dict[command]
except:
- print "Cannot find info on command %s - skipped"%command
+ print("Cannot find info on command %s - skipped"%command)
continue
if verbose:
- print line
+ print(line)
if command==canonical:
- doc=doc.replace("\n","\n"+format3offset*' ')
- print format3%(command,args_string,doc)
+ doc = doc.replace("\n", "\n" + format3offset * ' ')
+ print(format3 % (command,args_string,doc))
if verbose:
self.create_parser_command(command).print_help()
else:
- print format3%(command,"<<alias for %s>>"%canonical,"")
+ print(format3 % (command,"<<alias for %s>>"%canonical,""))
### now if a known command was found we can be more verbose on that one
def print_help (self):
- print "==================== Generic sfi usage"
+ print("==================== Generic sfi usage")
self.sfi_parser.print_help()
- (doc,_,example,canonical)=commands_dict[self.command]
+ (doc, _, example, canonical) = commands_dict[self.command]
if canonical != self.command:
- print "\n==================== NOTE: %s is an alias for genuine %s"%(self.command,canonical)
- self.command=canonical
- print "\n==================== Purpose of %s"%self.command
- print doc
- print "\n==================== Specific usage for %s"%self.command
+ print("\n==================== NOTE: {} is an alias for genuine {}"
+ .format(self.command, canonical))
+ self.command = canonical
+ print("\n==================== Purpose of {}".format(self.command))
+ print(doc)
+ print("\n==================== Specific usage for {}".format(self.command))
self.command_parser.print_help()
if example:
- print "\n==================== %s example(s)"%self.command
- print example
+ print("\n==================== {} example(s)".format(self.command))
+ print(example)
def create_parser_global(self):
# Generate command line parser
parser = OptionParser(add_help_option=False,
usage="sfi [sfi_options] command [cmd_options] [cmd_args]",
- description="Commands: %s"%(" ".join(commands_list)))
+ description="Commands: {}".format(" ".join(commands_list)))
parser.add_option("-r", "--registry", dest="registry",
help="root registry", metavar="URL", default=None)
parser.add_option("-s", "--sliceapi", dest="sm", default=None, metavar="URL",
(_, args_string, __,canonical) = commands_dict[command]
parser = OptionParser(add_help_option=False,
- usage="sfi [sfi_options] %s [cmd_options] %s"
- % (command, args_string))
+ usage="sfi [sfi_options] {} [cmd_options] {}"\
+ .format(command, args_string))
parser.add_option ("-h","--help",dest='help',action='store_true',default=False,
help="Summary of one command usage")
if canonical in ("register", "update"):
parser.add_option('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- parser.add_option('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ parser.add_option('-t', '--type', dest='type', metavar='<type>', help='object type (2 first chars is enough)', default=None)
parser.add_option('-e', '--email', dest='email', default="", help="email (mandatory for users)")
+ parser.add_option('-n', '--name', dest='name', default="", help="name (optional for authorities)")
parser.add_option('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
parser.add_option('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
help="renew as long as possible")
# registy filter option
if canonical in ("list", "show", "remove"):
- parser.add_option("-t", "--type", dest="type", type="choice",
- help="type filter ([all]|user|slice|authority|node|aggregate)",
- choices=("all", "user", "slice", "authority", "node", "aggregate"),
- default="all")
+ parser.add_option("-t", "--type", dest="type", metavar="<type>",
+ default="all",
+ help="type filter - 2 first chars is enough ([all]|user|slice|authority|node|aggregate)")
if canonical in ("show"):
parser.add_option("-k","--key",dest="keys",action="append",default=[],
help="specify specific keys to be displayed from record")
help="call Resolve without the 'details' option")
if canonical in ("resources", "describe"):
# rspec version
- parser.add_option("-r", "--rspec-version", dest="rspec_version", default="GENI 3",
- help="schema type and version of resulting RSpec")
+ parser.add_option("-r", "--rspec-version", dest="rspec_version", default=DEFAULT_RSPEC_VERSION,
+ help="schema type and version of resulting RSpec (default:{})".format(DEFAULT_RSPEC_VERSION))
# disable/enable cached rspecs
parser.add_option("-c", "--current", dest="current", default=False,
action="store_true",
#panos: a new option to define the type of information about resources a user is interested in
parser.add_option("-i", "--info", dest="info",
help="optional component information", default=None)
- # a new option to retreive or not reservation-oriented RSpecs (leases)
+ # a new option to retrieve or not reservation-oriented RSpecs (leases)
parser.add_option("-l", "--list_leases", dest="list_leases", type="choice",
- help="Retreive or not reservation-oriented RSpecs ([resources]|leases|all )",
+ help="Retrieve or not reservation-oriented RSpecs ([resources]|leases|all)",
choices=("all", "resources", "leases"), default="resources")
(doc, args_string, example, canonical) = commands_dict[command]
method=getattr(self, canonical, None)
if not method:
- print "sfi: unknown command %s"%command
- raise SystemExit,"Unknown command %s"%command
+ print("sfi: unknown command {}".format(command))
+ raise SystemExit("Unknown command {}".format(command))
+ for arg in command_args:
+ if 'help' in arg or arg == '-h':
+ self.print_help()
+ sys.exit(1)
return method(command_options, command_args)
def main(self):
sys.exit(1)
self.command_options = command_options
+ # allow incoming types on 2 characters only
+ if hasattr(command_options, 'type'):
+ command_options.type = normalize_type(command_options.type)
+ if not command_options.type:
+ sys.exit(1)
+
self.read_config ()
self.bootstrap ()
- self.logger.debug("Command=%s" % self.command)
+ self.logger.debug("Command={}".format(self.command))
try:
retcod = self.dispatch(command, command_options, command_args)
except SystemExit:
return 1
except:
- self.logger.log_exc ("sfi command %s failed"%command)
+ self.logger.log_exc ("sfi command {} failed".format(command))
return 1
return retcod
config.save(config_file)
except:
- self.logger.critical("Failed to read configuration file %s"%config_file)
+ self.logger.critical("Failed to read configuration file {}".format(config_file))
self.logger.info("Make sure to remove the export clauses and to add quotes")
if self.options.verbose==0:
self.logger.info("Re-run with -v for more details")
else:
- self.logger.log_exc("Could not read config file %s"%config_file)
+ self.logger.log_exc("Could not read config file {}".format(config_file))
sys.exit(1)
self.config_instance=config
elif hasattr(config, "SFI_SM"):
self.sm_url = config.SFI_SM
else:
- self.logger.error("You need to set e.g. SFI_SM='http://your.slicemanager.url:12347/' in %s" % config_file)
+ self.logger.error("You need to set e.g. SFI_SM='http://your.slicemanager.url:12347/' in {}".format(config_file))
errors += 1
# Set Registry URL
elif hasattr(config, "SFI_REGISTRY"):
self.reg_url = config.SFI_REGISTRY
else:
- self.logger.error("You need to set e.g. SFI_REGISTRY='http://your.registry.url:12345/' in %s" % config_file)
+ self.logger.error("You need to set e.g. SFI_REGISTRY='http://your.registry.url:12345/' in {}".format(config_file))
errors += 1
# Set user HRN
elif hasattr(config, "SFI_USER"):
self.user = config.SFI_USER
else:
- self.logger.error("You need to set e.g. SFI_USER='plc.princeton.username' in %s" % config_file)
+ self.logger.error("You need to set e.g. SFI_USER='plc.princeton.username' in {}".format(config_file))
errors += 1
# Set authority HRN
elif hasattr(config, "SFI_AUTH"):
self.authority = config.SFI_AUTH
else:
- self.logger.error("You need to set e.g. SFI_AUTH='plc.princeton' in %s" % config_file)
+ self.logger.error("You need to set e.g. SFI_AUTH='plc.princeton' in {}".format(config_file))
errors += 1
self.config_file=config_file
if not os.path.isfile(client_bootstrap.private_key_filename()):
self.logger.info ("private key not found, trying legacy name")
try:
- legacy_private_key = os.path.join (self.options.sfi_dir, "%s.pkey"%Xrn.unescape(get_leaf(self.user)))
- self.logger.debug("legacy_private_key=%s"%legacy_private_key)
+ legacy_private_key = os.path.join (self.options.sfi_dir, "{}.pkey"
+ .format(Xrn.unescape(get_leaf(self.user))))
+ self.logger.debug("legacy_private_key={}"
+ .format(legacy_private_key))
client_bootstrap.init_private_key_if_missing (legacy_private_key)
- self.logger.info("Copied private key from legacy location %s"%legacy_private_key)
+ self.logger.info("Copied private key from legacy location {}"
+ .format(legacy_private_key))
except:
self.logger.log_exc("Can't find private key ")
sys.exit(1)
object_hrn = object_gid.get_hrn()
if not object_cred.get_privileges().get_all_delegate():
- self.logger.error("Object credential %s does not have delegate bit set"%object_hrn)
+ self.logger.error("Object credential {} does not have delegate bit set"
+ .format(object_hrn))
return
# the delegating user's gid
def registry (self):
# cache the result
if not hasattr (self, 'registry_proxy'):
- self.logger.info("Contacting Registry at: %s"%self.reg_url)
- self.registry_proxy = SfaServerProxy(self.reg_url, self.private_key, self.my_gid,
- timeout=self.options.timeout, verbose=self.options.debug)
+ self.logger.info("Contacting Registry at: {}".format(self.reg_url))
+ self.registry_proxy \
+ = SfaServerProxy(self.reg_url, self.private_key, self.my_gid,
+ timeout=self.options.timeout, verbose=self.options.debug)
return self.registry_proxy
def sliceapi (self):
records = self.registry().Resolve(node_hrn, self.my_credential_string)
records = filter_records('node', records)
if not records:
- self.logger.warning("No such component:%r"% opts.component)
+ self.logger.warning("No such component:{}".format(opts.component))
record = records[0]
- cm_url = "http://%s:%d/"%(record['hostname'],CM_PORT)
+ cm_url = "http://{}:{}/".format(record['hostname'], CM_PORT)
self.sliceapi_proxy=SfaServerProxy(cm_url, self.private_key, self.my_gid)
else:
# otherwise use what was provided as --sliceapi, or SFI_SM in the config
if not self.sm_url.startswith('http://') or self.sm_url.startswith('https://'):
self.sm_url = 'http://' + self.sm_url
- self.logger.info("Contacting Slice Manager at: %s"%self.sm_url)
- self.sliceapi_proxy = SfaServerProxy(self.sm_url, self.private_key, self.my_gid,
- timeout=self.options.timeout, verbose=self.options.debug)
+ self.logger.info("Contacting Slice Manager at: {}".format(self.sm_url))
+ self.sliceapi_proxy \
+ = SfaServerProxy(self.sm_url, self.private_key, self.my_gid,
+ timeout=self.options.timeout, verbose=self.options.debug)
return self.sliceapi_proxy
def get_cached_server_version(self, server):
cache = Cache(cache_file)
except IOError:
cache = Cache()
- self.logger.info("Local cache not found at: %s" % cache_file)
+ self.logger.info("Local cache not found at: {}".format(cache_file))
if cache:
version = cache.get(cache_key)
version= ReturnValue.get_value(result)
# cache version for 20 minutes
cache.add(cache_key, version, ttl= 60*20)
- self.logger.info("Updating cache file %s" % cache_file)
+ self.logger.info("Updating cache file {}".format(cache_file))
cache.save_to_file(cache_file)
return version
if (os.path.isfile(file)):
return file
else:
- self.logger.critical("No such rspec file %s"%rspec)
+ self.logger.critical("No such rspec file {}".format(rspec))
sys.exit(1)
def get_record_file(self, record):
if (os.path.isfile(file)):
return file
else:
- self.logger.critical("No such registry record file %s"%record)
+ self.logger.critical("No such registry record file {}".format(record))
sys.exit(1)
if not output:
return 0
# something went wrong
- print 'ERROR:',output
+ print('ERROR:', output)
return 1
#==========================================================================
@declare_command("","")
def config (self, options, args):
"Display contents of current config"
- print "# From configuration file %s"%self.config_file
+ print("# From configuration file {}".format(self.config_file))
flags=[ ('sfi', [ ('registry','reg_url'),
('auth','authority'),
('user','user'),
flags.append ( ('myslice', ['backend', 'delegate', 'platform', 'username'] ) )
for (section, tuples) in flags:
- print "[%s]"%section
+ print("[{}]".format(section))
try:
- for (external_name, internal_name) in tuples:
- print "%-20s = %s"%(external_name,getattr(self,internal_name))
+ for external_name, internal_name in tuples:
+ print("{:<20} = {}".format(external_name, getattr(self, internal_name)))
except:
- for name in tuples:
- varname="%s_%s"%(section.upper(),name.upper())
- value=getattr(self.config_instance,varname)
- print "%-20s = %s"%(name,value)
+ for external_name, internal_name in tuples:
+ varname = "{}_{}".format(section.upper(), external_name.upper())
+ value = getattr(self.config_instance,varname)
+ print("{:<20} = {}".format(external_name, value))
# xxx should analyze result
return 0
record_dicts = self.registry().Resolve(hrn, self.my_credential_string, resolve_options)
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
- self.logger.error("No record of type %s"% options.type)
+ self.logger.error("No record of type {}".format(options.type))
return
# user has required to focus on some keys
if options.keys:
records = [ Record(dict=record_dict) for record_dict in record_dicts ]
for record in records:
if (options.format == "text"): record.dump(sort=True)
- else: print record.save_as_xml()
+ else: print(record.save_as_xml())
if options.file:
save_records_to_file(options.file, record_dicts, options.fileformat)
# xxx should analyze result
try:
record_filepath = args[0]
rec_file = self.get_record_file(record_filepath)
- record_dict.update(load_record_from_file(rec_file).todict())
+ record_dict.update(load_record_from_file(rec_file).record_to_dict())
except:
- print "Cannot load record file %s"%record_filepath
+ print("Cannot load record file {}".format(record_filepath))
sys.exit(1)
if options:
- record_dict.update(load_record_from_opts(options).todict())
+ record_dict.update(load_record_from_opts(options).record_to_dict())
# we should have a type by now
if 'type' not in record_dict :
self.print_help()
if len(args) > 0:
record_filepath = args[0]
rec_file = self.get_record_file(record_filepath)
- record_dict.update(load_record_from_file(rec_file).todict())
+ record_dict.update(load_record_from_file(rec_file).record_to_dict())
if options:
- record_dict.update(load_record_from_opts(options).todict())
+ record_dict.update(load_record_from_opts(options).record_to_dict())
# at the very least we need 'type' here
- if 'type' not in record_dict:
+ if 'type' not in record_dict or record_dict['type'] is None:
self.print_help()
sys.exit(1)
elif record_dict['type'] in ['node']:
cred = self.my_authority_credential_string()
else:
- raise "unknown record type" + record_dict['type']
+ raise Exception("unknown record type {}".format(record_dict['type']))
if options.show_credential:
show_credentials(cred)
update = self.registry().Update(record_dict, cred)
# ==================================================================
# show rspec for named slice
- @declare_command("","")
+ @declare_command("","",['discover'])
def resources(self, options, args):
"""
discover available resources (ListResources)
if self.options.raw:
save_raw_to_file(delete, self.options.raw, self.options.rawformat, self.options.rawbanner)
else:
- print value
+ print(value)
return self.success (delete)
@declare_command("slice_hrn rspec","")
"""
server = self.sliceapi()
server_version = self.get_cached_server_version(server)
+ if len(args) != 2:
+ self.print_help()
+ sys.exit(1)
slice_hrn = args[0]
+ rspec_file = self.get_rspec_file(args[1])
+
slice_urn = Xrn(slice_hrn, type='slice').get_urn()
# credentials
show_credentials(creds)
# rspec
- rspec_file = self.get_rspec_file(args[1])
- rspec = open(rspec_file).read()
api_options = {}
api_options ['call_id'] = unique_call_id()
# users
api_options['sfa_users'] = sfa_users
api_options['geni_users'] = geni_users
- allocate = server.Allocate(slice_urn, creds, rspec, api_options)
+ with open(rspec_file) as rspec:
+ rspec_xml = rspec.read()
+ allocate = server.Allocate(slice_urn, creds, rspec_xml, api_options)
value = ReturnValue.get_value(allocate)
if self.options.raw:
save_raw_to_file(allocate, self.options.raw, self.options.rawformat, self.options.rawbanner)
if options.file is not None:
save_rspec_to_file (value['geni_rspec'], options.file)
if (self.options.raw is None) and (options.file is None):
- print value
+ print(value)
return self.success(allocate)
@declare_command("slice_hrn [<sliver_urn>...]","")
if options.file is not None:
save_rspec_to_file (value['geni_rspec'], options.file)
if (self.options.raw is None) and (options.file is None):
- print value
+ print(value)
return self.success(provision)
@declare_command("slice_hrn","")
if self.options.raw:
save_raw_to_file(status, self.options.raw, self.options.rawformat, self.options.rawbanner)
else:
- print value
+ print(value)
return self.success (status)
@declare_command("slice_hrn [<sliver_urn>...] action","")
if self.options.raw:
save_raw_to_file(perform_action, self.options.raw, self.options.rawformat, self.options.rawbanner)
else:
- print value
+ print(value)
return self.success (perform_action)
@declare_command("slice_hrn [<sliver_urn>...] time",
if self.options.raw:
save_raw_to_file(renew, self.options.raw, self.options.rawformat, self.options.rawbanner)
else:
- print value
+ print(value)
return self.success(renew)
@declare_command("slice_hrn","")
if self.options.raw:
save_raw_to_file(shutdown, self.options.raw, self.options.rawformat, self.options.rawbanner)
else:
- print value
+ print(value)
return self.success (shutdown)
@declare_command("[name]","")
if options.file:
filename = options.file
else:
- filename = os.sep.join([self.options.sfi_dir, '%s.gid' % target_hrn])
- self.logger.info("writing %s gid to %s" % (target_hrn, filename))
+ filename = os.sep.join([self.options.sfi_dir, '{}.gid'.format(target_hrn)])
+ self.logger.info("writing {} gid to {}".format(target_hrn, filename))
GID(string=gid).save_to_file(filename)
# xxx should analyze result
return 0
to_hrn = args[0]
# support for several delegations in the same call
# so first we gather the things to do
- tuples=[]
+ tuples = []
for slice_hrn in options.delegate_slices:
- message="%s.slice"%slice_hrn
+ message = "{}.slice".format(slice_hrn)
original = self.slice_credential_string(slice_hrn)
tuples.append ( (message, original,) )
if options.delegate_pi:
my_authority=self.authority
- message="%s.pi"%my_authority
+ message = "{}.pi".format(my_authority)
original = self.my_authority_credential_string()
tuples.append ( (message, original,) )
for auth_hrn in options.delegate_auths:
- message="%s.auth"%auth_hrn
- original=self.authority_credential_string(auth_hrn)
+ message = "{}.auth".format(auth_hrn)
+ original = self.authority_credential_string(auth_hrn)
tuples.append ( (message, original, ) )
# if nothing was specified at all at this point, let's assume -u
if not tuples: options.delegate_user=True
# this user cred
if options.delegate_user:
- message="%s.user"%self.user
+ message = "{}.user".format(self.user)
original = self.my_credential_string
tuples.append ( (message, original, ) )
for (message,original) in tuples:
delegated_string = self.client_bootstrap.delegate_credential_string(original, to_hrn, to_type)
delegated_credential = Credential (string=delegated_string)
- filename = os.path.join ( self.options.sfi_dir,
- "%s_for_%s.%s.cred"%(message,to_hrn,to_type))
+ filename = os.path.join(self.options.sfi_dir,
+ "{}_for_{}.{}.cred".format(message, to_hrn, to_type))
delegated_credential.save_to_file(filename, save_parents=True)
- self.logger.info("delegated credential for %s to %s and wrote to %s"%(message,to_hrn,filename))
+ self.logger.info("delegated credential for {} to {} and wrote to {}"
+ .format(message, to_hrn, filename))
####################
@declare_command("","""$ less +/myslice sfi_config
else:
full_key="MYSLICE_" + key.upper()
value=getattr(self.config_instance,full_key,None)
- if value: myslice_dict[key]=value
- else: print "Unsufficient config, missing key %s in [myslice] section of sfi_config"%key
+ if value:
+ myslice_dict[key]=value
+ else:
+ print("Unsufficient config, missing key {} in [myslice] section of sfi_config"
+ .format(key))
if len(myslice_dict) != len(myslice_keys):
sys.exit(1)
# (b) figure whether we are PI for the authority where we belong
- self.logger.info("Resolving our own id %s"%self.user)
+ self.logger.info("Resolving our own id {}".format(self.user))
my_records=self.registry().Resolve(self.user,self.my_credential_string)
- if len(my_records)!=1: print "Cannot Resolve %s -- exiting"%self.user; sys.exit(1)
- my_record=my_records[0]
+ if len(my_records) != 1:
+ print("Cannot Resolve {} -- exiting".format(self.user))
+ sys.exit(1)
+ my_record = my_records[0]
my_auths_all = my_record['reg-pi-authorities']
- self.logger.info("Found %d authorities that we are PI for"%len(my_auths_all))
- self.logger.debug("They are %s"%(my_auths_all))
+ self.logger.info("Found {} authorities that we are PI for".format(len(my_auths_all)))
+ self.logger.debug("They are {}".format(my_auths_all))
my_auths = my_auths_all
if options.delegate_auths:
my_auths = list(set(my_auths_all).intersection(set(options.delegate_auths)))
- self.logger.debug("Restricted to user-provided auths"%(my_auths))
+ self.logger.debug("Restricted to user-provided auths {}".format(my_auths))
# (c) get the set of slices that we are in
my_slices_all=my_record['reg-slices']
- self.logger.info("Found %d slices that we are member of"%len(my_slices_all))
- self.logger.debug("They are: %s"%(my_slices_all))
+ self.logger.info("Found {} slices that we are member of".format(len(my_slices_all)))
+ self.logger.debug("They are: {}".format(my_slices_all))
my_slices = my_slices_all
# if user provided slices, deal only with these - if they are found
if options.delegate_slices:
my_slices = list(set(my_slices_all).intersection(set(options.delegate_slices)))
- self.logger.debug("Restricted to user-provided slices: %s"%(my_slices))
+ self.logger.debug("Restricted to user-provided slices: {}".format(my_slices))
# (d) make sure we have *valid* credentials for all these
hrn_credentials=[]
delegated_credential = self.client_bootstrap.delegate_credential_string (credential, delegatee_hrn, delegatee_type)
# save these so user can monitor what she's uploaded
filename = os.path.join ( self.options.sfi_dir,
- "%s.%s_for_%s.%s.cred"%(hrn,htype,delegatee_hrn,delegatee_type))
+ "{}.{}_for_{}.{}.cred"\
+ .format(hrn, htype, delegatee_hrn, delegatee_type))
with file(filename,'w') as f:
f.write(delegated_credential)
- self.logger.debug("(Over)wrote %s"%filename)
+ self.logger.debug("(Over)wrote {}".format(filename))
hrn_delegated_credentials.append ((hrn, htype, delegated_credential, filename, ))
# (f) and finally upload them to manifold server
# xxx todo add an option so the password can be set on the command line
# (but *NOT* in the config file) so other apps can leverage this
- self.logger.info("Uploading on backend at %s"%myslice_dict['backend'])
+ self.logger.info("Uploading on backend at {}".format(myslice_dict['backend']))
uploader = ManifoldUploader (logger=self.logger,
url=myslice_dict['backend'],
platform=myslice_dict['platform'],
# inspect
inspect=Credential(string=delegated_credential)
expire_datetime=inspect.get_expiration()
- message="%s (%s) [exp:%s]"%(hrn,htype,expire_datetime)
+ message="{} ({}) [exp:{}]".format(hrn, htype, expire_datetime)
if uploader.upload(delegated_credential,message=message):
count_success+=1
count_all+=1
- self.logger.info("Successfully uploaded %d/%d credentials"%(count_success,count_all))
+ self.logger.info("Successfully uploaded {}/{} credentials"
+ .format(count_success, count_all))
# at first I thought we would want to save these,
# like 'sfi delegate does' but on second thought
trusted_certs = ReturnValue.get_value(trusted_certs)
for trusted_cert in trusted_certs:
- print "\n===========================================================\n"
+ print("\n===========================================================\n")
gid = GID(string=trusted_cert)
gid.dump()
cert = Certificate(string=trusted_cert)
- self.logger.debug('Sfi.trusted -> %r'%cert.get_subject())
- print "Certificate:\n%s\n\n"%trusted_cert
+ self.logger.debug('Sfi.trusted -> {}'.format(cert.get_subject()))
+ print("Certificate:\n{}\n\n".format(trusted_cert))
# xxx should analyze result
return 0
+++ /dev/null
-"""
-This API is adapted for OpenLDAP. The file contains all LDAP classes and methods
-needed to:
-- Load the LDAP connection configuration file (login, address..) with LdapConfig
-- Connect to LDAP with ldap_co
-- Create a unique LDAP login and password for a user based on his email or last
-name and first name with LoginPassword.
-- Manage entries in LDAP using SFA records with LDAPapi (Search, Add, Delete,
-Modify)
-
-"""
-import random
-from passlib.hash import ldap_salted_sha1 as lssha
-
-from sfa.util.xrn import get_authority
-from sfa.util.sfalogging import logger
-from sfa.util.config import Config
-
-import ldap
-import ldap.modlist as modlist
-
-import os.path
-
-
-class LdapConfig():
- """
- Ldap configuration class loads the configuration file and sets the
- ldap IP address, password, people dn, web dn, group dn. All these settings
- were defined in a separate file ldap_config.py to avoid sharing them in
- the SFA git as it contains sensible information.
-
- """
- def __init__(self, config_file='/etc/sfa/ldap_config.py'):
- """Loads configuration from file /etc/sfa/ldap_config.py and set the
- parameters for connection to LDAP.
-
- """
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
-
-
-class ldap_co:
- """ Set admin login and server configuration variables."""
-
- def __init__(self):
- """Fetch LdapConfig attributes (Ldap server connection parameters and
- defines port , version and subtree scope.
-
- """
- #Iotlab PROD LDAP parameters
- self.ldapserv = None
- ldap_config = LdapConfig()
- self.config = ldap_config
- self.ldapHost = ldap_config.LDAP_IP_ADDRESS
- self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
- self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
- self.ldapAdminDN = ldap_config.LDAP_WEB_DN
- self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
- self.ldapPort = ldap.PORT
- self.ldapVersion = ldap.VERSION3
- self.ldapSearchScope = ldap.SCOPE_SUBTREE
-
- def connect(self, bind=True):
- """Enables connection to the LDAP server.
-
- :param bind: Set the bind parameter to True if a bind is needed
- (for add/modify/delete operations). Set to False otherwise.
- :type bind: boolean
- :returns: dictionary with status of the connection. True if Successful,
- False if not and in this case the error
- message( {'bool', 'message'} ).
- :rtype: dict
-
- """
- try:
- self.ldapserv = ldap.open(self.ldapHost)
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- # Bind with authentification
- if(bind):
- return self.bind()
-
- else:
- return {'bool': True}
-
- def bind(self):
- """ Binding method.
-
- :returns: dictionary with the bind status. True if Successful,
- False if not and in this case the error message({'bool','message'})
- :rtype: dict
-
- """
- try:
- # Opens a connection after a call to ldap.open in connect:
- self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
-
- # Bind/authenticate with a user with apropriate
- #rights to add objects
- self.ldapserv.simple_bind_s(self.ldapAdminDN,
- self.ldapAdminPassword)
-
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- return {'bool': True}
-
- def close(self):
- """Close the LDAP connection.
-
- Can throw an exception if the unbinding fails.
-
- :returns: dictionary with the bind status if the unbinding failed and
- in this case the dict contains an error message. The dictionary keys
- are : ({'bool','message'})
- :rtype: dict or None
-
- """
- try:
- self.ldapserv.unbind_s()
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
-
-class LoginPassword():
- """
-
- Class to handle login and password generation, using custom login generation
- algorithm.
-
- """
- def __init__(self):
- """
-
- Sets password and login maximum length, and defines the characters that
- can be found in a random generated password.
-
- """
- self.login_max_length = 8
- self.length_password = 8
- self.chars_password = ['!', '$', '(',')', '*', '+', ',', '-', '.',
- '0', '1', '2', '3', '4', '5', '6', '7', '8',
- '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
- 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
- '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- '\'']
-
- @staticmethod
- def clean_user_names(record):
- """
-
- Removes special characters such as '-', '_' , '[', ']' and ' ' from the
- first name and last name.
-
- :param record: user's record
- :type record: dict
- :returns: lower_first_name and lower_last_name if they were found
- in the user's record. Return None, none otherwise.
- :rtype: string, string or None, None.
-
- """
- if 'first_name' in record and 'last_name' in record:
- #Remove all special characters from first_name/last name
- lower_first_name = record['first_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- lower_last_name = record['last_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- return lower_first_name, lower_last_name
- else:
- return None, None
-
- @staticmethod
- def extract_name_from_email(record):
- """
-
- When there is no valid first name and last name in the record,
- the email is used to generate the login. Here, we assume the email
- is firstname.lastname@something.smthg. The first name and last names
- are extracted from the email, special charcaters are removed and
- they are changed into lower case.
-
- :param record: user's data
- :type record: dict
- :returns: the first name and last name taken from the user's email.
- lower_first_name, lower_last_name.
- :rtype: string, string
-
- """
-
- email = record['email']
- email = email.split('@')[0].lower()
- lower_first_name = None
- lower_last_name = None
- #Assume there is first name and last name in email
- #if there is a separator
- separator_list = ['.', '_', '-']
- for sep in separator_list:
- if sep in email:
- mail = email.split(sep)
- lower_first_name = mail[0]
- lower_last_name = mail[1]
- break
-
- #Otherwise just take the part before the @ as the
- #lower_first_name and lower_last_name
- if lower_first_name is None:
- lower_first_name = email
- lower_last_name = email
-
- return lower_first_name, lower_last_name
-
- def get_user_firstname_lastname(self, record):
- """
-
- Get the user first name and last name from the information we have in
- the record.
-
- :param record: user's information
- :type record: dict
- :returns: the user's first name and last name.
-
- .. seealso:: clean_user_names
- .. seealso:: extract_name_from_email
-
- """
- lower_first_name, lower_last_name = self.clean_user_names(record)
-
- #No first name and last name check email
- if lower_first_name is None and lower_last_name is None:
-
- lower_first_name, lower_last_name = \
- self.extract_name_from_email(record)
-
- return lower_first_name, lower_last_name
-
- def choose_sets_chars_for_login(self, lower_first_name, lower_last_name):
- """
-
- Algorithm to select sets of characters from the first name and last
- name, depending on the lenght of the last name and the maximum login
- length which in our case is set to 8 characters.
-
- :param lower_first_name: user's first name in lower case.
- :param lower_last_name: usr's last name in lower case.
- :returns: user's login
- :rtype: string
-
- """
- length_last_name = len(lower_last_name)
- self.login_max_length = 8
-
- #Try generating a unique login based on first name and last name
-
- if length_last_name >= self.login_max_length:
- login = lower_last_name[0:self.login_max_length]
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name >= 4:
- login = lower_last_name
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 3:
- login = lower_first_name[0:1] + lower_last_name
- index = 1
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 2:
- if len(lower_first_name) >= 2:
- login = lower_first_name[0:2] + lower_last_name
- index = 2
- logger.debug("login : %s index : %s" % (login, index))
- else:
- logger.error("LoginException : \
- Generation login error with \
- minimum four characters")
-
- else:
- logger.error("LDAP LdapGenerateUniqueLogin failed : \
- impossible to generate unique login for %s %s"
- % (lower_first_name, lower_last_name))
- return index, login
-
- def generate_password(self):
- """
-
- Generate a password upon adding a new user in LDAP Directory
- (8 characters length). The generated password is composed of characters
- from the chars_password list.
-
- :returns: the randomly generated password
- :rtype: string
-
- """
- password = str()
-
- length = len(self.chars_password)
- for index in range(self.length_password):
- char_index = random.randint(0, length - 1)
- password += self.chars_password[char_index]
-
- return password
-
- @staticmethod
- def encrypt_password(password):
- """
-
- Use passlib library to make a RFC2307 LDAP encrypted password salt size
- is 8, use sha-1 algorithm.
-
- :param password: password not encrypted.
- :type password: string
- :returns: Returns encrypted password.
- :rtype: string
-
- """
- #Keep consistency with Java Iotlab's LDAP API
- #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
- return lssha.encrypt(password, salt_size=8)
-
-
-class LDAPapi:
- """Defines functions to insert and search entries in the LDAP.
-
- .. note:: class supposes the unix schema is used
-
- """
- def __init__(self):
- logger.setLevelDebug()
-
- #SFA related config
-
- config = Config()
- self.login_pwd = LoginPassword()
- self.authname = config.SFA_REGISTRY_ROOT_AUTH
- self.conn = ldap_co()
- self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
- self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
- self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
- self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
- self.baseDN = self.conn.ldapPeopleDN
- self.ldapShell = '/bin/bash'
-
-
- def LdapGenerateUniqueLogin(self, record):
- """
-
- Generate login for adding a new user in LDAP Directory
- (four characters minimum length). Get proper last name and
- first name so that the user's login can be generated.
-
- :param record: Record must contain first_name and last_name.
- :type record: dict
- :returns: the generated login for the user described with record if the
- login generation is successful, None if it fails.
- :rtype: string or None
-
- """
- #For compatibility with other ldap func
- if 'mail' in record and 'email' not in record:
- record['email'] = record['mail']
-
- lower_first_name, lower_last_name = \
- self.login_pwd.get_user_firstname_lastname(record)
-
- index, login = self.login_pwd.choose_sets_chars_for_login(
- lower_first_name, lower_last_name)
-
- login_filter = '(uid=' + login + ')'
- get_attrs = ['uid']
- try:
- #Check if login already in use
-
- while (len(self.LdapSearch(login_filter, get_attrs)) is not 0):
-
- index += 1
- if index >= 9:
- logger.error("LoginException : Generation login error \
- with minimum four characters")
- else:
- try:
- login = \
- lower_first_name[0:index] + \
- lower_last_name[0:
- self.login_pwd.login_max_length
- - index]
- login_filter = '(uid=' + login + ')'
- except KeyError:
- print "lower_first_name - lower_last_name too short"
-
- logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"
- % (login))
- return login
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" % (error))
- return None
-
- def find_max_uidNumber(self):
- """Find the LDAP max uidNumber (POSIX uid attribute).
-
- Used when adding a new user in LDAP Directory
-
- :returns: max uidNumber + 1
- :rtype: string
-
- """
- #First, get all the users in the LDAP
- get_attrs = "(uidNumber=*)"
- login_filter = ['uidNumber']
-
- result_data = self.LdapSearch(get_attrs, login_filter)
- #It there is no user in LDAP yet, First LDAP user
- if result_data == []:
- max_uidnumber = self.ldapUserUidNumberMin
- #Otherwise, get the highest uidNumber
- else:
- uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]
- logger.debug("LDAPapi.py \tfind_max_uidNumber \
- uidNumberList %s " % (uidNumberList))
- max_uidnumber = max(uidNumberList) + 1
-
- return str(max_uidnumber)
-
-
- def get_ssh_pkey(self, record):
- """TODO ; Get ssh public key from sfa record
- To be filled by N. Turro ? or using GID pl way?
-
- """
- return 'A REMPLIR '
-
- @staticmethod
- #TODO Handle OR filtering in the ldap query when
- #dealing with a list of records instead of doing a for loop in GetPersons
- def make_ldap_filters_from_record(record=None):
- """Helper function to make LDAP filter requests out of SFA records.
-
- :param record: user's sfa record. Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: LDAP request
- :rtype: string
-
- """
- req_ldap = ''
- req_ldapdict = {}
- if record :
- if 'first_name' in record and 'last_name' in record:
- if record['first_name'] != record['last_name']:
- req_ldapdict['cn'] = str(record['first_name'])+" "\
- + str(record['last_name'])
- if 'email' in record:
- req_ldapdict['mail'] = record['email']
- if 'mail' in record:
- req_ldapdict['mail'] = record['mail']
- if 'enabled' in record:
- if record['enabled'] is True:
- req_ldapdict['shadowExpire'] = '-1'
- else:
- req_ldapdict['shadowExpire'] = '0'
-
- #Hrn should not be part of the filter because the hrn
- #presented by a certificate of a SFA user not imported in
- #Iotlab does not include the iotlab login in it
- #Plus, the SFA user may already have an account with iotlab
- #using another login.
-
- logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
- record %s req_ldapdict %s"
- % (record, req_ldapdict))
-
- for k in req_ldapdict:
- req_ldap += '(' + str(k) + '=' + str(req_ldapdict[k]) + ')'
- if len(req_ldapdict.keys()) >1 :
- req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
- size = len(req_ldap)
- req_ldap = req_ldap[:(size-1)] + ')' + req_ldap[(size-1):]
- else:
- req_ldap = "(cn=*)"
-
- return req_ldap
-
- def make_ldap_attributes_from_record(self, record):
- """
-
- When adding a new user to Iotlab's LDAP, creates an attributes
- dictionnary from the SFA record understandable by LDAP. Generates the
- user's LDAP login.User is automatically validated (account enabled)
- and described as a SFA USER FROM OUTSIDE IOTLAB.
-
- :param record: must contain the following keys and values:
- first_name, last_name, mail, pkey (ssh key).
- :type record: dict
- :returns: dictionary of attributes using LDAP data structure model.
- :rtype: dict
-
- """
-
- attrs = {}
- attrs['objectClass'] = ["top", "person", "inetOrgPerson",
- "organizationalPerson", "posixAccount",
- "shadowAccount", "systemQuotas",
- "ldapPublicKey"]
-
- attrs['uid'] = self.LdapGenerateUniqueLogin(record)
- try:
- attrs['givenName'] = str(record['first_name']).lower().capitalize()
- attrs['sn'] = str(record['last_name']).lower().capitalize()
- attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
- attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
-
- except KeyError:
- attrs['givenName'] = attrs['uid']
- attrs['sn'] = attrs['uid']
- attrs['cn'] = attrs['uid']
- attrs['gecos'] = attrs['uid']
-
- attrs['quota'] = self.ldapUserQuotaNFS
- attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
- attrs['loginShell'] = self.ldapShell
- attrs['gidNumber'] = self.ldapUserGidNumber
- attrs['uidNumber'] = self.find_max_uidNumber()
- attrs['mail'] = record['mail'].lower()
- try:
- attrs['sshPublicKey'] = record['pkey']
- except KeyError:
- attrs['sshPublicKey'] = self.get_ssh_pkey(record)
-
-
- #Password is automatically generated because SFA user don't go
- #through the Iotlab website used to register new users,
- #There is no place in SFA where users can enter such information
- #yet.
- #If the user wants to set his own password , he must go to the Iotlab
- #website.
- password = self.login_pwd.generate_password()
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
-
- #Account automatically validated (no mail request to admins)
- #Set to 0 to disable the account, -1 to enable it,
- attrs['shadowExpire'] = '-1'
-
- #Motivation field in Iotlab
- attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
-
- attrs['ou'] = 'SFA' #Optional: organizational unit
- #No info about those here:
- attrs['l'] = 'To be defined'#Optional: Locality.
- attrs['st'] = 'To be defined' #Optional: state or province (country).
-
- return attrs
-
-
-
- def LdapAddUser(self, record) :
- """Add SFA user to LDAP if it is not in LDAP yet.
-
- :param record: dictionnary with the user's data.
- :returns: a dictionary with the status (Fail= False, Success= True)
- and the uid of the newly added user if successful, or the error
- message it is not. Dict has keys bool and message in case of
- failure, and bool uid in case of success.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record
-
- """
- logger.debug(" \r\n \t LDAP LdapAddUser \r\n\r\n ================\r\n ")
- user_ldap_attrs = self.make_ldap_attributes_from_record(record)
-
- #Check if user already in LDAP wih email, first name and last name
- filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
- user_exist = self.LdapSearch(filter_by)
- if user_exist:
- logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
- already exists" % (user_ldap_attrs['sn'],
- user_ldap_attrs['mail']))
- return {'bool': False}
-
- #Bind to the server
- result = self.conn.connect()
-
- if(result['bool']):
-
- # A dict to help build the "body" of the object
- logger.debug(" \r\n \t LDAP LdapAddUser attrs %s "
- % user_ldap_attrs)
-
- # The dn of our new entry/object
- dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
-
- try:
- ldif = modlist.addModlist(user_ldap_attrs)
- logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"
- % (user_ldap_attrs, ldif))
- self.conn.ldapserv.add_s(dn, ldif)
-
- logger.info("Adding user %s login %s in LDAP"
- % (user_ldap_attrs['cn'], user_ldap_attrs['uid']))
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Add Error %s" % error)
- return {'bool': False, 'message': error}
-
- self.conn.close()
- return {'bool': True, 'uid': user_ldap_attrs['uid']}
- else:
- return result
-
- def LdapDelete(self, person_dn):
- """Deletes a person in LDAP. Uses the dn of the user.
-
- :param person_dn: user's ldap dn.
- :type person_dn: string
- :returns: dictionary with bool True if successful, bool False
- and the error if not.
- :rtype: dict
-
- """
- #Connect and bind
- result = self.conn.connect()
- if(result['bool']):
- try:
- self.conn.ldapserv.delete_s(person_dn)
- self.conn.close()
- return {'bool': True}
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Delete Error %s" % error)
- return {'bool': False, 'message': error}
-
- def LdapDeleteUser(self, record_filter):
- """Deletes a SFA person in LDAP, based on the user's hrn.
-
- :param record_filter: Filter to find the user to be deleted. Must
- contain at least the user's email.
- :type record_filter: dict
- :returns: dict with bool True if successful, bool False and error
- message otherwise.
- :rtype: dict
-
- .. seealso:: LdapFindUser docstring for more info on record filter.
- .. seealso:: LdapDelete for user deletion
-
- """
- #Find uid of the person
- person = self.LdapFindUser(record_filter, [])
- logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s"
- % (record_filter, person))
-
- if person:
- dn = 'uid=' + person['uid'] + "," + self.baseDN
- else:
- return {'bool': False}
-
- result = self.LdapDelete(dn)
- return result
-
- def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
- """ Modifies a LDAP entry, replaces user's old attributes with
- the new ones given.
-
- :param dn: user's absolute name in the LDAP hierarchy.
- :param old_attributes_dict: old user's attributes. Keys must match
- the ones used in the LDAP model.
- :param new_attributes_dict: new user's attributes. Keys must match
- the ones used in the LDAP model.
- :type dn: string
- :type old_attributes_dict: dict
- :type new_attributes_dict: dict
- :returns: dict bool True if Successful, bool False if not.
- :rtype: dict
-
- """
-
- ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
- # Connect and bind/authenticate
- result = self.conn.connect()
- if (result['bool']):
- try:
- self.conn.ldapserv.modify_s(dn, ldif)
- self.conn.close()
- return {'bool': True}
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapModify Error %s" % error)
- return {'bool': False}
-
-
- def LdapModifyUser(self, user_record, new_attributes_dict):
- """
-
- Gets the record from one user based on the user sfa recordand changes
- the attributes according to the specified new_attributes. Do not use
- this if we need to modify the uid. Use a ModRDN operation instead
- ( modify relative DN ).
-
- :param user_record: sfa user record.
- :param new_attributes_dict: new user attributes, keys must be the
- same as the LDAP model.
- :type user_record: dict
- :type new_attributes_dict: dict
- :returns: bool True if successful, bool False if not.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record for info on what is mandatory
- in the user_record.
- .. seealso:: make_ldap_attributes_from_record for the LDAP objectclass.
-
- """
- if user_record is None:
- logger.error("LDAP \t LdapModifyUser Need user record ")
- return {'bool': False}
-
- #Get all the attributes of the user_uid_login
- #person = self.LdapFindUser(record_filter,[])
- req_ldap = self.make_ldap_filters_from_record(user_record)
- person_list = self.LdapSearch(req_ldap, [])
- logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s"
- % (person_list))
-
- if person_list and len(person_list) > 1:
- logger.error("LDAP \t LdapModifyUser Too many users returned")
- return {'bool': False}
- if person_list is None:
- logger.error("LDAP \t LdapModifyUser User %s doesn't exist "
- % (user_record))
- return {'bool': False}
-
- # The dn of our existing entry/object
- #One result only from ldapSearch
- person = person_list[0][1]
- dn = 'uid=' + person['uid'][0] + "," + self.baseDN
-
- if new_attributes_dict:
- old = {}
- for k in new_attributes_dict:
- if k not in person:
- old[k] = ''
- else:
- old[k] = person[k]
- logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"
- % (new_attributes_dict))
- result = self.LdapModify(dn, old, new_attributes_dict)
- return result
- else:
- logger.error("LDAP \t LdapModifyUser No new attributes given. ")
- return {'bool': False}
-
-
- def LdapMarkUserAsDeleted(self, record):
- """
-
- Sets shadowExpire to 0, disabling the user in LDAP. Calls LdapModifyUser
- to change the shadowExpire of the user.
-
- :param record: the record of the user who has to be disabled.
- Should contain first_name,last_name, email or mail, and if the
- record is enabled or not. If the dict record does not have all of
- these, must at least contain the user's email.
- :type record: dict
- :returns: {bool: True} if successful or {bool: False} if not
- :rtype: dict
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
- """
-
- new_attrs = {}
- #Disable account
- new_attrs['shadowExpire'] = '0'
- logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
- ret = self.LdapModifyUser(record, new_attrs)
- return ret
-
- def LdapResetPassword(self, record):
- """Resets password for the user whose record is the parameter and
- changes the corresponding entry in the LDAP.
-
- :param record: user's sfa record whose Ldap password must be reset.
- Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: return value of LdapModifyUser. True if successful, False
- otherwise.
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
-
- """
- password = self.login_pwd.generate_password()
- attrs = {}
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
- logger.debug("LDAP LdapResetPassword encrypt_password %s"
- % (attrs['userPassword']))
- result = self.LdapModifyUser(record, attrs)
- return result
-
-
- def LdapSearch(self, req_ldap=None, expected_fields=None):
- """
- Used to search directly in LDAP, by using ldap filters and return
- fields. When req_ldap is None, returns all the entries in the LDAP.
-
- :param req_ldap: ldap style request, with appropriate filters,
- example: (cn=*).
- :param expected_fields: Fields in the user ldap entry that has to be
- returned. If None is provided, will return 'mail', 'givenName',
- 'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
- :type req_ldap: string
- :type expected_fields: list
-
- .. seealso:: make_ldap_filters_from_record for req_ldap format.
-
- """
- result = self.conn.connect(bind=False)
- if (result['bool']):
-
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey', 'shadowExpire']
- else:
- return_fields_list = expected_fields
- #No specifc request specified, get the whole LDAP
- if req_ldap is None:
- req_ldap = '(cn=*)'
-
- logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
- return_fields_list %s" \
- %(req_ldap, return_fields_list))
-
- try:
- msg_id = self.conn.ldapserv.search(
- self.baseDN, ldap.SCOPE_SUBTREE,
- req_ldap, return_fields_list)
- #Get all the results matching the search from ldap in one
- #shot (1 value)
- result_type, result_data = \
- self.conn.ldapserv.result(msg_id, 1)
-
- self.conn.close()
-
- logger.debug("LDAP.PY \t LdapSearch result_data %s"
- % (result_data))
-
- return result_data
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapSearch Error %s" % error)
- return []
-
- else:
- logger.error("LDAP.PY \t Connection Failed")
- return
-
- def _process_ldap_info_for_all_users(self, result_data):
- """Process the data of all enabled users in LDAP.
-
- :param result_data: Contains information of all enabled users in LDAP
- and is coming from LdapSearch.
- :param result_data: list
-
- .. seealso:: LdapSearch
-
- """
- results = []
- logger.debug(" LDAP.py _process_ldap_info_for_all_users result_data %s "
- % (result_data))
- for ldapentry in result_data:
- logger.debug(" LDAP.py _process_ldap_info_for_all_users \
- ldapentry name : %s " % (ldapentry[1]['uid'][0]))
- tmpname = ldapentry[1]['uid'][0]
- hrn = self.authname + "." + tmpname
-
- tmpemail = ldapentry[1]['mail'][0]
- if ldapentry[1]['mail'][0] == "unknown":
- tmpemail = None
-
- try:
- results.append({
- 'type': 'user',
- 'pkey': ldapentry[1]['sshPublicKey'][0],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname ,
- 'email':tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry[1]['givenName'][0],
- 'last_name': ldapentry[1]['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': self.authname,
- 'peer_authority': '',
- 'pointer': -1,
- 'hrn': hrn,
- })
- except KeyError, error:
- logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s"
- % (error))
- return
-
- return results
-
- def _process_ldap_info_for_one_user(self, record, result_data):
- """
-
- Put the user's ldap data into shape. Only deals with one user
- record and one user data from ldap.
-
- :param record: user record
- :param result_data: Raw ldap data coming from LdapSearch
- :returns: user's data dict with 'type','pkey','uid', 'email',
- 'first_name' 'last_name''serial''authority''peer_authority'
- 'pointer''hrn'
- :type record: dict
- :type result_data: list
- :rtype :dict
-
- """
- #One entry only in the ldap data because we used a filter
- #to find one user only
- ldapentry = result_data[0][1]
- logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" % (ldapentry))
- tmpname = ldapentry['uid'][0]
-
- tmpemail = ldapentry['mail'][0]
- if ldapentry['mail'][0] == "unknown":
- tmpemail = None
-
- parent_hrn = None
- peer_authority = None
- if 'hrn' in record:
- hrn = record['hrn']
- parent_hrn = get_authority(hrn)
- if parent_hrn != self.authname:
- peer_authority = parent_hrn
- #In case the user was not imported from Iotlab LDAP
- #but from another federated site, has an account in
- #iotlab but currently using his hrn from federated site
- #then the login is different from the one found in its hrn
- if tmpname != hrn.split('.')[1]:
- hrn = None
- else:
- hrn = None
-
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname,
- 'email': tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer': -1,
- 'hrn': hrn,
- }
- return results
-
- def LdapFindUser(self, record=None, is_user_enabled=None,
- expected_fields=None):
- """
-
- Search a SFA user with a hrn. User should be already registered
- in Iotlab LDAP.
-
- :param record: sfa user's record. Should contain first_name,last_name,
- email or mail. If no record is provided, returns all the users found
- in LDAP.
- :type record: dict
- :param is_user_enabled: is the user's iotlab account already valid.
- :type is_user_enabled: Boolean.
- :returns: LDAP entries from ldap matching the filter provided. Returns
- a single entry if one filter has been given and a list of
- entries otherwise.
- :rtype: dict or list
-
- """
- custom_record = {}
- if is_user_enabled:
- custom_record['enabled'] = is_user_enabled
- if record:
- custom_record.update(record)
-
- req_ldap = self.make_ldap_filters_from_record(custom_record)
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey']
- else:
- return_fields_list = expected_fields
-
- result_data = self.LdapSearch(req_ldap, return_fields_list)
- logger.debug("LDAP.PY \t LdapFindUser result_data %s" % (result_data))
-
- if len(result_data) == 0:
- return None
- #Asked for a specific user
- if record is not None:
- results = self._process_ldap_info_for_one_user(record, result_data)
-
- else:
- #Asked for all users in ldap
- results = self._process_ldap_info_for_all_users(result_data)
- return results
\ No newline at end of file
+++ /dev/null
-"""
-File providing methods to generate valid RSpecs for the Iotlab testbed.
-Contains methods to get information on slice, slivers, nodes and leases,
-formatting them and turn it into a RSpec.
-"""
-from sfa.util.xrn import hrn_to_urn, urn_to_hrn
-from sfa.util.sfatime import utcparse, datetime_to_string
-
-from sfa.iotlab.iotlabxrn import IotlabXrn, xrn_object
-from sfa.rspecs.rspec import RSpec
-
-#from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.node import NodeElement
-from sfa.rspecs.elements.login import Login
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.lease import Lease
-from sfa.rspecs.elements.granularity import Granularity
-from sfa.rspecs.version_manager import VersionManager
-from sfa.storage.model import SliverAllocation
-from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition, \
- IotlabLocation
-
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import Xrn
-
-import time
-
-class CortexlabAggregate:
- """Aggregate manager class for cortexlab. """
-
- sites = {}
- nodes = {}
- api = None
- interfaces = {}
- links = {}
- node_tags = {}
-
- prepared = False
-
- user_options = {}
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_slice_and_slivers(self, slice_xrn, login=None):
- """
- Get the slices and the associated leases if any, from the cortexlab
- testbed. One slice can have mutliple leases.
- For each slice, get the nodes in the associated lease
- and create a sliver with the necessary info and insert it into the
- sliver dictionary, keyed on the node hostnames.
- Returns a dict of slivers based on the sliver's node_id.
- Called by get_rspec.
-
-
- :param slice_xrn: xrn of the slice
- :param login: user's login on cortexlab ldap
-
- :type slice_xrn: string
- :type login: string
- :returns: a list of slices dict and a list of Sliver object
- :rtype: (list, list)
-
- .. note: There is no real slivers in cortexlab, only leases. The goal
- is to be consistent with the SFA standard.
-
- """
-
-
- slivers = {}
- sfa_slice = None
- if slice_xrn is None:
- return (sfa_slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
-
- # GetSlices always returns a list, even if there is only one element
- slices = self.driver.GetSlices(slice_filter=str(slice_hrn),
- slice_filter_type='slice_hrn',
- login=login)
-
- logger.debug("CortexlabAggregate api \tget_slice_and_slivers \
- slice_hrn %s \r\n slices %s self.driver.hrn %s"
- % (slice_hrn, slices, self.driver.hrn))
- if slices == []:
- return (sfa_slice, slivers)
-
- # sort slivers by node id , if there is a job
- #and therefore, node allocated to this slice
- # for sfa_slice in slices:
- sfa_slice = slices[0]
- try:
- node_ids_list = sfa_slice['node_ids']
- except KeyError:
- logger.log_exc("CORTEXLABAGGREGATE \t \
- get_slice_and_slivers No nodes in the slice \
- - KeyError ")
- node_ids_list = []
- # continue
-
- for node in node_ids_list:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id': sliver_xrn.urn,
- 'name': sfa_slice['hrn'],
- 'type': 'cortexlab-node',
- 'tags': []})
-
- slivers[node] = sliver
-
- #Add default sliver attribute :
- #connection information for cortexlab, assuming it is the same ssh
- # connection process
- # look in ldap:
- ldap_username = self.find_ldap_username_from_slice(sfa_slice)
-
- if ldap_username is not None:
- ssh_access = None
- slivers['default_sliver'] = {'ssh': ssh_access,
- 'login': ldap_username}
-
-
- logger.debug("CORTEXLABAGGREGATE api get_slice_and_slivers slivers %s "
- % (slivers))
- return (slices, slivers)
-
-
- def find_ldap_username_from_slice(self, sfa_slice):
- """
- Gets the ldap username of the user based on the information contained
- in ist sfa_slice record.
-
- :param sfa_slice: the user's slice record. Must contain the
- reg_researchers key.
- :type sfa_slice: dictionary
- :returns: ldap_username, the ldap user's login.
- :rtype: string
- """
-
- researchers = [sfa_slice['reg_researchers'][0].__dict__]
- # look in ldap:
- ldap_username = None
- ret = self.driver.testbed_shell.GetPersons(researchers)
- if len(ret) != 0:
- ldap_username = ret[0]['uid']
-
- return ldap_username
-
-
-
- def get_nodes(self, options=None):
- """Returns the nodes in the slice using the rspec format, with all the
- nodes' properties.
-
- Fetch the nodes ids in the slices dictionary and get all the nodes
- properties from OAR. Makes a rspec dicitonary out of this and returns
- it. If the slice does not have any job running or scheduled, that is
- it has no reserved nodes, then returns an empty list.
-
- :returns: An empty list if the slice has no reserved nodes, a rspec
- list with all the nodes and their properties (a dict per node)
- otherwise.
- :rtype: list
-
- .. seealso:: get_slice_and_slivers
-
- """
- filter_nodes = None
- if options:
- geni_available = options.get('geni_available')
- if geni_available == True:
- filter_nodes['boot_state'] = ['Alive']
-
- # slice_nodes_list = []
- # if slices is not None:
- # for one_slice in slices:
- # try:
- # slice_nodes_list = one_slice['node_ids']
- # # if we are dealing with a slice that has no node just
- # # return an empty list. In iotlab a slice can have multiple
- # # jobs scheduled, so it either has at least one lease or
- # # not at all.
- # except KeyError:
- # return []
-
- # get the granularity in second for the reservation system
- # grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- nodes = self.driver.testbed_shell.GetNodes(node_filter_dict =
- filter_nodes)
-
- nodes_dict = {}
-
- #if slices, this means we got to list all the nodes given to this slice
- # Make a list of all the nodes in the slice before getting their
- #attributes
-
- for node in nodes:
- nodes_dict[node['node_id']] = node
-
- return nodes_dict
-
-
- def node_to_rspec_node(self, node):
- """ Creates a rspec node structure with the appropriate information
- based on the node information that can be found in the node dictionary.
-
- :param node: node data. this dict contains information about the node
- and must have the following keys : mobile, radio, archi, hostname,
- boot_state, site, x, y ,z (position).
- :type node: dictionary.
-
- :returns: node dictionary containing the following keys : mobile, archi,
- radio, component_id, component_name, component_manager_id,
- authority_id, boot_state, exclusive, hardware_types, location,
- position, granularity, tags.
- :rtype: dict
-
- """
-
- grain = self.driver.testbed_shell.GetLeaseGranularity()
- rspec_node = NodeElement()
-
- # xxx how to retrieve site['login_base']
- #site_id=node['site_id']
- #site=sites_dict[site_id]
-
- rspec_node['mobile'] = node['mobile']
- rspec_node['archi'] = node['archi']
- rspec_node['radio'] = node['radio']
- cortexlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node['hostname'])
-
- rspec_node['component_id'] = cortexlab_xrn.urn
- rspec_node['component_name'] = node['hostname']
- rspec_node['component_manager_id'] = \
- hrn_to_urn(self.driver.testbed_shell.root_auth,
- 'authority+sa')
-
- # Iotlab's nodes are federated : there is only one authority
- # for all Iotlab sites, registered in SFA.
- # Removing the part including the site
- # in authority_id SA 27/07/12
- rspec_node['authority_id'] = rspec_node['component_manager_id']
-
- # do not include boot state (<available> element)
- #in the manifest rspec
-
-
- rspec_node['boot_state'] = node['boot_state']
- # if node['hostname'] in reserved_nodes:
- # rspec_node['boot_state'] = "Reserved"
- rspec_node['exclusive'] = 'true'
- rspec_node['hardware_types'] = [HardwareType({'name': \
- 'iotlab-node'})]
-
- location = IotlabLocation({'country':'France', 'site': \
- node['site']})
- rspec_node['location'] = location
-
-
- position = IotlabPosition()
- for field in position :
- try:
- position[field] = node[field]
- except KeyError, error :
- logger.log_exc("Cortexlabaggregate\t node_to_rspec_node \
- position %s "% (error))
-
- rspec_node['position'] = position
-
-
- # Granularity
- granularity = Granularity({'grain': grain})
- rspec_node['granularity'] = granularity
- rspec_node['tags'] = []
- # if node['hostname'] in slivers:
- # # add sliver info
- # sliver = slivers[node['hostname']]
- # rspec_node['sliver_id'] = sliver['sliver_id']
- # rspec_node['client_id'] = node['hostname']
- # rspec_node['slivers'] = [sliver]
-
- # # slivers always provide the ssh service
- # login = Login({'authentication': 'ssh-keys', \
- # 'hostname': node['hostname'], 'port':'22', \
- # 'username': sliver['name']})
- # service = Services({'login': login})
- # rspec_node['services'] = [service]
-
- return rspec_node
-
-
- def rspec_node_to_geni_sliver(self, rspec_node, sliver_allocations=None):
- """Makes a geni sliver structure from all the nodes allocated
- to slivers in the sliver_allocations dictionary. Returns the states
- of the sliver.
-
- :param rspec_node: Node information contained in a rspec data structure
- fashion.
- :type rspec_node: dictionary
- :param sliver_allocations:
- :type sliver_allocations: dictionary
-
- :returns: Dictionary with the following keys: geni_sliver_urn,
- geni_expires, geni_allocation_status, geni_operational_status,
- geni_error.
-
- :rtype: dictionary
-
- .. seealso:: node_to_rspec_node
-
- """
- if sliver_allocations is None: sliver_allocations={}
-
- if rspec_node['sliver_id'] in sliver_allocations:
- # set sliver allocation and operational status
- sliver_allocation = sliver_allocations[rspec_node['sliver_id']]
- if sliver_allocation:
- allocation_status = sliver_allocation.allocation_state
- if allocation_status == 'geni_allocated':
- op_status = 'geni_pending_allocation'
- elif allocation_status == 'geni_provisioned':
- op_status = 'geni_ready'
- else:
- op_status = 'geni_unknown'
- else:
- allocation_status = 'geni_unallocated'
- else:
- allocation_status = 'geni_unallocated'
- op_status = 'geni_failed'
- # required fields
- geni_sliver = {'geni_sliver_urn': rspec_node['sliver_id'],
- 'geni_expires': rspec_node['expires'],
- 'geni_allocation_status' : allocation_status,
- 'geni_operational_status': op_status,
- 'geni_error': '',
- }
- return geni_sliver
-
- def sliver_to_rspec_node(self, sliver, sliver_allocations):
- """Used by describe to format node information into a rspec compliant
- structure.
-
- Creates a node rspec compliant structure by calling node_to_rspec_node.
- Adds slivers, if any, to rspec node structure. Returns the updated
- rspec node struct.
-
- :param sliver: sliver dictionary. Contains keys: urn, slice_id, hostname
- and slice_name.
- :type sliver: dictionary
- :param sliver_allocations: dictionary of slivers
- :type sliver_allocations: dict
-
- :returns: Node dictionary with all necessary data.
-
- .. seealso:: node_to_rspec_node
- """
- rspec_node = self.node_to_rspec_node(sliver)
- rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
- # add sliver info
- logger.debug("CORTEXLABAGGREGATE api \t sliver_to_rspec_node sliver \
- %s \r\nsliver_allocations %s" % (sliver,
- sliver_allocations))
- rspec_sliver = Sliver({'sliver_id': sliver['urn'],
- 'name': sliver['slice_id'],
- 'type': 'iotlab-exclusive',
- 'tags': []})
- rspec_node['sliver_id'] = rspec_sliver['sliver_id']
-
- if sliver['urn'] in sliver_allocations:
- rspec_node['client_id'] = sliver_allocations[
- sliver['urn']].client_id
- if sliver_allocations[sliver['urn']].component_id:
- rspec_node['component_id'] = sliver_allocations[
- sliver['urn']].component_id
- rspec_node['slivers'] = [rspec_sliver]
-
- # slivers always provide the ssh service
- login = Login({'authentication': 'ssh-keys',
- 'hostname': sliver['hostname'],
- 'port':'22',
- 'username': sliver['slice_name'],
- 'login': sliver['slice_name']
- })
- return rspec_node
-
-
- def get_all_leases(self, ldap_username):
- """
-
- Get list of lease dictionaries which all have the mandatory keys
- ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
- All the leases running or scheduled are returned.
-
- :param ldap_username: if ldap uid is not None, looks for the leases
- belonging to this user.
- :type ldap_username: string
- :returns: rspec lease dictionary with keys lease_id, component_id,
- slice_id, start_time, duration where the lease_id is the oar job id,
- component_id is the node's urn, slice_id is the slice urn,
- start_time is the timestamp starting time and duration is expressed
- in terms of the testbed's granularity.
- :rtype: dict
-
- .. note::There is no filtering of leases within a given time frame.
- All the running or scheduled leases are returned. options
- removed SA 15/05/2013
-
-
- """
-
- logger.debug("CortexlabAggregate get_all_leases ldap_username %s "
- % (ldap_username))
- leases = self.driver.driver.GetLeases(login=ldap_username)
- grain = self.driver.testbed_shell.GetLeaseGranularity()
- # site_ids = []
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
-
- cortexlab_xrn = xrn_object(
- self.driver.testbed_shell.root_auth, node)
- rspec_lease['component_id'] = cortexlab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in cortexlab_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
- def get_rspec(self, slice_xrn=None, login=None, version=None,
- options=None):
- """
- Returns xml rspec:
- - a full advertisement rspec with the testbed resources if slice_xrn is
- not specified.If a lease option is given, also returns the leases
- scheduled on the testbed.
- - a manifest Rspec with the leases and nodes in slice's leases if
- slice_xrn is not None.
-
- :param slice_xrn: srn of the slice
- :type slice_xrn: string
- :param login: user'uid (ldap login) on cortexlab
- :type login: string
- :param version: can be set to sfa or cortexlab
- :type version: RSpecVersion
- :param options: used to specify if the leases should also be included in
- the returned rspec.
- :type options: dict
-
- :returns: Xml Rspec.
- :rtype: XML
-
-
- """
-
- ldap_username = None
- rspec = None
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- logger.debug("CortexlabAggregate \t get_rspec ***version %s \
- version.type %s version.version %s options %s \r\n"
- % (version, version.type, version.version, options))
-
- if slice_xrn is None:
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
-
- else:
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
-
- slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
- if slice_xrn and slices is not None:
- #Get user associated with this slice
- #for one_slice in slices :
- ldap_username = self.find_ldap_username_from_slice(slices[0])
- # ldap_username = slices[0]['reg_researchers'][0].__dict__['hrn']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- logger.debug("CortexlabAggregate \tget_rspec **** \
- LDAP USERNAME %s \r\n" \
- % (ldap_username))
- #at this point sliver may be empty if no cortexlab job
- #is running for this user/slice.
- rspec = RSpec(version=rspec_version, user_options=options)
-
- logger.debug("\r\n \r\n CortexlabAggregate \tget_rspec *** \
- slice_xrn %s slices %s\r\n \r\n"
- % (slice_xrn, slices))
-
- if options is not None :
- lease_option = options['list_leases']
- else:
- #If no options are specified, at least print the resources
- lease_option = 'all'
- #if slice_xrn :
- #lease_option = 'all'
-
- if lease_option in ['all', 'resources']:
- #if not options.get('list_leases') or options.get('list_leases')
- #and options['list_leases'] != 'leases':
- nodes = self.get_nodes()
- logger.debug("\r\n")
- logger.debug("CortexlabAggregate \t lease_option %s \
- get rspec ******* nodes %s"
- % (lease_option, nodes))
-
- sites_set = set([node['location']['site'] for node in nodes])
-
- #In case creating a job, slice_xrn is not set to None
- rspec.version.add_nodes(nodes)
- if slice_xrn and slices is not None:
- # #Get user associated with this slice
- # #for one_slice in slices :
- # ldap_username = slices[0]['reg_researchers']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- # # ldap_username = tmp[1].split('_')[0]
-
- logger.debug("CortexlabAggregate \tget_rspec **** \
- version type %s ldap_ user %s \r\n" \
- % (version.type, ldap_username))
- #TODO : Change the version of Rspec here in case of pbm -SA 09/01/14
- if version.type in ["Cortexlab", "Iotlab"]:
- rspec.version.add_connection_information(
- ldap_username, sites_set)
-
- default_sliver = slivers.get('default_sliver', [])
- if default_sliver and len(nodes) is not 0:
- #default_sliver_attribs = default_sliver.get('tags', [])
- logger.debug("CortexlabAggregate \tget_rspec **** \
- default_sliver%s \r\n" % (default_sliver))
- for attrib in default_sliver:
- rspec.version.add_default_sliver_attribute(
- attrib, default_sliver[attrib])
-
- if lease_option in ['all','leases']:
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
- logger.debug("CortexlabAggregate \tget_rspec **** \
- FINAL RSPEC %s \r\n" % (rspec.toxml()))
- return rspec.toxml()
-
-
-
- def get_slivers(self, urns, options=None):
- """Get slivers of the given slice urns. Slivers contains slice, node and
- user information.
-
- For Iotlab, returns the leases with sliver ids and their allocation
- status.
-
- :param urns: list of slice urns.
- :type urns: list of strings
- :param options: unused
- :type options: unused
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
- """
- if options is None: options={}
-
- slice_ids = set()
- node_ids = []
- for urn in urns:
- xrn = IotlabXrn(xrn=urn)
- if xrn.type == 'sliver':
- # id: slice_id-node_id
- try:
- sliver_id_parts = xrn.get_sliver_id_parts()
- slice_id = int(sliver_id_parts[0])
- node_id = int(sliver_id_parts[1])
- slice_ids.add(slice_id)
- node_ids.append(node_id)
- except ValueError:
- pass
- else:
- slice_names = set()
- slice_names.add(xrn.hrn)
-
-
- logger.debug("CortexlabAggregate \t get_slivers urns %s slice_ids %s \
- node_ids %s\r\n" % (urns, slice_ids, node_ids))
- logger.debug("CortexlabAggregate \t get_slivers xrn %s slice_names %s \
- \r\n" % (xrn, slice_names))
- filter_sliver = {}
- if slice_names:
- filter_sliver['slice_hrn'] = list(slice_names)
- slice_hrn = filter_sliver['slice_hrn'][0]
-
- slice_filter_type = 'slice_hrn'
-
- # if slice_ids:
- # filter['slice_id'] = list(slice_ids)
- # # get slices
- if slice_hrn:
- slices = self.driver.GetSlices(slice_hrn,
- slice_filter_type)
- leases = self.driver.GetLeases({'slice_hrn':slice_hrn})
- logger.debug("CortexlabAggregate \t get_slivers \
- slices %s leases %s\r\n" % (slices, leases ))
- if not slices:
- return []
-
- single_slice = slices[0]
- # get sliver users
- user = single_slice['reg_researchers'][0].__dict__
- logger.debug("CortexlabAggregate \t get_slivers user %s \
- \r\n" % (user))
-
- # construct user key info
- person = self.driver.testbed_shell.ldap.LdapFindUser(record=user)
- logger.debug("CortexlabAggregate \t get_slivers person %s \
- \r\n" % (person))
- # name = person['last_name']
- user['login'] = person['uid']
- user['user_urn'] = hrn_to_urn(user['hrn'], 'user')
- user['keys'] = person['pkey']
-
-
- try:
- node_ids = single_slice['node_ids']
- node_list = self.driver.testbed_shell.GetNodes(
- {'hostname':single_slice['node_ids']})
- node_by_hostname = dict([(node['hostname'], node)
- for node in node_list])
- except KeyError:
- logger.warning("\t get_slivers No slivers in slice")
- # slice['node_ids'] = node_ids
- # nodes_dict = self.get_slice_nodes(slice, options)
-
- slivers = []
- for current_lease in leases:
- for hostname in current_lease['reserved_nodes']:
- node = {}
- node['slice_id'] = current_lease['slice_id']
- node['slice_hrn'] = current_lease['slice_hrn']
- slice_name = current_lease['slice_hrn'].split(".")[1]
- node['slice_name'] = slice_name
- index = current_lease['reserved_nodes'].index(hostname)
- node_id = current_lease['resource_ids'][index]
- # node['slice_name'] = user['login']
- # node.update(single_slice)
- more_info = node_by_hostname[hostname]
- node.update(more_info)
- # oar_job_id is the slice_id (lease_id)
- sliver_hrn = '%s.%s-%s' % (self.driver.hrn,
- current_lease['lease_id'], node_id)
- node['node_id'] = node_id
- node['expires'] = current_lease['t_until']
- node['sliver_id'] = Xrn(sliver_hrn, type='sliver').urn
- node['urn'] = node['sliver_id']
- node['services_user'] = [user]
-
- slivers.append(node)
- return slivers
-
-
- def list_resources(self, version = None, options=None):
- """
- Returns an advertisement Rspec of available resources at this
- aggregate. This Rspec contains a resource listing along with their
- description, providing sufficient information for clients to be able to
- select among available resources.
-
- :param options: various options. The valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}} . The only
- mandatory options if options is specified is geni_rspec_version.
- :type options: dictionary
-
- :returns: On success, the value field of the return struct will contain
- a geni.rspec advertisment RSpec
- :rtype: Rspec advertisement in xml.
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#RSpecdatatype
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources
- """
-
- if options is None: options={}
-
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
- rspec = RSpec(version=rspec_version, user_options=options)
- # variable ldap_username to be compliant with get_all_leases
- # prototype. Now unused in geni-v3 since we are getting all the leases
- # here
- ldap_username = None
- if not options.get('list_leases') or options['list_leases'] != 'leases':
- # get nodes
- nodes_dict = self.get_nodes(options)
-
- # no interfaces on iotlab nodes
- # convert nodes to rspec nodes
- rspec_nodes = []
- for node_id in nodes_dict:
- node = nodes_dict[node_id]
- rspec_node = self.node_to_rspec_node(node)
- rspec_nodes.append(rspec_node)
- rspec.version.add_nodes(rspec_nodes)
-
- # add links
- # links = self.get_links(sites, nodes_dict, interfaces)
- # rspec.version.add_links(links)
-
- if not options.get('list_leases') or options.get('list_leases') \
- and options['list_leases'] != 'resources':
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
-
- return rspec.toxml()
-
-
- def describe(self, urns, version=None, options=None):
- """
- Retrieve a manifest RSpec describing the resources contained by the
- named entities, e.g. a single slice or a set of the slivers in a slice.
- This listing and description should be sufficiently descriptive to allow
- experimenters to use the resources.
-
- :param urns: If a slice urn is supplied and there are no slivers in the
- given slice at this aggregate, then geni_rspec shall be a valid
- manifest RSpec, containing no node elements - no resources.
- :type urns: list or strings
- :param options: various options. the valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}}
- :type options: dictionary
-
- :returns: On success returns the following dictionary {geni_rspec:
- <geni.rspec, a Manifest RSpec>, geni_urn: <string slice urn of the
- containing slice>, geni_slivers:{ geni_sliver_urn:
- <string sliver urn>, geni_expires: <dateTime.rfc3339 allocation
- expiration string, as in geni_expires from SliversStatus>,
- geni_allocation_status: <string sliver state - e.g. geni_allocated
- or geni_provisioned >, geni_operational_status:
- <string sliver operational state>, geni_error: <optional string.
- The field may be omitted entirely but may not be null/None,
- explaining any failure for a sliver.>}
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
- """
- if options is None: options={}
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
- rspec = RSpec(version=rspec_version, user_options=options)
-
- # get slivers
- geni_slivers = []
- slivers = self.get_slivers(urns, options)
- if slivers:
- rspec_expires = datetime_to_string(utcparse(slivers[0]['expires']))
- else:
- rspec_expires = datetime_to_string(utcparse(time.time()))
- rspec.xml.set('expires', rspec_expires)
-
- # lookup the sliver allocations
- geni_urn = urns[0]
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- logger.debug(" Cortexlabaggregate.PY \tDescribe sliver_ids %s "
- % (sliver_ids))
- constraint = SliverAllocation.sliver_id.in_(sliver_ids)
- query = self.driver.api.dbsession().query(SliverAllocation)
- sliver_allocations = query.filter((constraint)).all()
- logger.debug(" Cortexlabaggregate.PY \tDescribe sliver_allocations %s "
- % (sliver_allocations))
- sliver_allocation_dict = {}
- for sliver_allocation in sliver_allocations:
- geni_urn = sliver_allocation.slice_urn
- sliver_allocation_dict[sliver_allocation.sliver_id] = \
- sliver_allocation
-
- # add slivers
- nodes_dict = {}
- for sliver in slivers:
- nodes_dict[sliver['node_id']] = sliver
- rspec_nodes = []
- for sliver in slivers:
- rspec_node = self.sliver_to_rspec_node(sliver,
- sliver_allocation_dict)
- rspec_nodes.append(rspec_node)
- logger.debug(" Cortexlabaggregate.PY \tDescribe sliver_allocation_dict %s "
- % (sliver_allocation_dict))
- geni_sliver = self.rspec_node_to_geni_sliver(rspec_node,
- sliver_allocation_dict)
- geni_slivers.append(geni_sliver)
-
- logger.debug(" Cortexlabaggregate.PY \tDescribe rspec_nodes %s\
- rspec %s "
- % (rspec_nodes, rspec))
- rspec.version.add_nodes(rspec_nodes)
-
- return {'geni_urn': geni_urn,
- 'geni_rspec': rspec.toxml(),
- 'geni_slivers': geni_slivers}
\ No newline at end of file
+++ /dev/null
-"""
-Implements what a driver should provide for SFA to work.
-"""
-from datetime import datetime
-from sfa.util.sfatime import utcparse, datetime_to_string
-
-from sfa.util.faults import SliverDoesNotExist, Forbidden
-from sfa.util.sfalogging import logger
-
-from sfa.trust.hierarchy import Hierarchy
-from sfa.trust.gid import create_uuid
-
-from sfa.managers.driver import Driver
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-
-from sfa.cortexlab.cortexlabaggregate import CortexlabAggregate
-
-from sfa.cortexlab.cortexlabslices import CortexlabSlices
-from sfa.cortexlab.cortexlabshell import CortexlabShell
-
-from sfa.iotlab.iotlabxrn import IotlabXrn, xrn_object
-from sfa.util.xrn import Xrn, hrn_to_urn, get_authority, urn_to_hrn
-
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.credential import Credential
-from sfa.storage.model import SliverAllocation
-
-from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
-from sfa.iotlab.iotlabpostgres import LeaseTableXP
-from sqlalchemy.orm import joinedload
-
-class CortexlabDriver(Driver):
- """ Cortexlab Driver class inherited from Driver generic class.
-
- Contains methods compliant with the SFA standard and the testbed
- infrastructure (calls to LDAP and scheduler to book the nodes).
-
- .. seealso::: Driver class
-
- """
- def __init__(self, api):
- """
-
- Sets the Cortexlab SFA config parameters,
- instanciates the testbed api.
-
- :param api: SfaApi configuration object. Holds reference to the
- database.
- :type api: SfaApi object
- """
-
- Driver.__init__(self, api)
- self.api = api
- config = api.config
- self.testbed_shell = CortexlabShell(config)
- self.cache = None
-
- def GetPeers(self, peer_filter=None ):
- """ Gathers registered authorities in SFA DB and looks for specific peer
- if peer_filter is specified.
- :param peer_filter: name of the site authority looked for.
- :type peer_filter: string
- :returns: list of records.
-
- """
-
- existing_records = {}
- existing_hrns_by_types = {}
- logger.debug("CORTEXLAB_API \tGetPeers peer_filter %s " % (peer_filter))
- query = self.api.dbsession().query(RegRecord)
- all_records = query.filter(RegRecord.type.like('%authority%')).all()
-
- for record in all_records:
- existing_records[(record.hrn, record.type)] = record
- if record.type not in existing_hrns_by_types:
- existing_hrns_by_types[record.type] = [record.hrn]
- else:
- existing_hrns_by_types[record.type].append(record.hrn)
-
- logger.debug("CORTEXLAB_API \tGetPeer\texisting_hrns_by_types %s "
- % (existing_hrns_by_types))
- records_list = []
-
- try:
- if peer_filter:
- records_list.append(existing_records[(peer_filter,
- 'authority')])
- else:
- for hrn in existing_hrns_by_types['authority']:
- records_list.append(existing_records[(hrn, 'authority')])
-
- logger.debug("CORTEXLAB_API \tGetPeer \trecords_list %s "
- % (records_list))
-
- except KeyError:
- pass
-
- return_records = records_list
- logger.debug("CORTEXLAB_API \tGetPeer return_records %s "
- % (return_records))
- return return_records
-
- def GetKeys(self, key_filter=None):
- """Returns a dict of dict based on the key string. Each dict entry
- contains the key id, the ssh key, the user's email and the
- user's hrn.
- If key_filter is specified and is an array of key identifiers,
- only keys matching the filter will be returned.
-
- Admin may query all keys. Non-admins may only query their own keys.
- FROM PLC API DOC
-
- :returns: dict with ssh key as key and dicts as value.
- :rtype: dict
- """
- query = self.api.dbsession().query(RegKey)
- if key_filter is None:
- keys = query.options(joinedload('reg_user')).all()
- else:
- constraint = RegKey.key.in_(key_filter)
- keys = query.options(joinedload('reg_user')).filter(constraint).all()
-
- key_dict = {}
- for key in keys:
- key_dict[key.key] = {'key_id': key.key_id, 'key': key.key,
- 'email': key.reg_user.email,
- 'hrn': key.reg_user.hrn}
-
- #ldap_rslt = self.ldap.LdapSearch({'enabled']=True})
- #user_by_email = dict((user[1]['mail'][0], user[1]['sshPublicKey']) \
- #for user in ldap_rslt)
-
- logger.debug("CORTEXLAB_API GetKeys -key_dict %s \r\n " % (key_dict))
- return key_dict
-
- def AddPerson(self, record):
- """
-
- Adds a new account. Any fields specified in records are used,
- otherwise defaults are used. Creates an appropriate login by calling
- LdapAddUser.
-
- :param record: dictionary with the sfa user's properties.
- :returns: a dicitonary with the status. If successful, the dictionary
- boolean is set to True and there is a 'uid' key with the new login
- added to LDAP, otherwise the bool is set to False and a key
- 'message' is in the dictionary, with the error message.
- :rtype: dict
-
- """
- ret = self.testbed_shell.ldap.LdapAddUser(record)
-
- if ret['bool'] is True:
- record['hrn'] = self.testbed_shell.root_auth + '.' + ret['uid']
- logger.debug("Cortexlab api AddPerson return code %s record %s "
- % (ret, record))
- self.__add_person_to_db(record)
- return ret
-
- def __add_person_to_db(self, user_dict):
- """
- Add a federated user straight to db when the user issues a lease
- request with nodes and that he has not registered with cortexlab
- yet (that is he does not have a LDAP entry yet).
- Uses parts of the routines in CortexlabImport when importing user
- from LDAP.
- Called by AddPerson, right after LdapAddUser.
- :param user_dict: Must contain email, hrn and pkey to get a GID
- and be added to the SFA db.
- :type user_dict: dict
-
- """
- request = self.api.dbsession().query(RegUser)
- check_if_exists = \
- request.filter_by(email = user_dict['email']).first()
- #user doesn't exists
- if not check_if_exists:
- logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
- " %(user_dict))
- hrn = user_dict['hrn']
- person_urn = hrn_to_urn(hrn, 'user')
- pubkey = user_dict['pkey']
- try:
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- logger.warn('__add_person_to_db: unable to convert public \
- key for %s' %(hrn ))
- pkey = Keypair(create=True)
-
-
- if pubkey is not None and pkey is not None :
- hierarchy = Hierarchy()
- person_gid = hierarchy.create_gid(person_urn, create_uuid(), \
- pkey)
- if user_dict['email']:
- logger.debug("__add_person_to_db \r\n \r\n \
- IOTLAB IMPORTER PERSON EMAIL OK email %s "\
- %(user_dict['email']))
- person_gid.set_email(user_dict['email'])
-
- user_record = RegUser(hrn=hrn , pointer= '-1', \
- authority=get_authority(hrn), \
- email=user_dict['email'], gid = person_gid)
- user_record.reg_keys = [RegKey(user_dict['pkey'])]
- user_record.just_created()
- self.api.dbsession().add (user_record)
- self.api.dbsession().commit()
- return
-
-
- def _sql_get_slice_info(self, slice_filter):
- """
- Get the slice record based on the slice hrn. Fetch the record of the
- user associated with the slice by using joinedload based on the
- reg_researcher relationship.
-
- :param slice_filter: the slice hrn we are looking for
- :type slice_filter: string
- :returns: the slice record enhanced with the user's information if the
- slice was found, None it wasn't.
-
- :rtype: dict or None.
- """
- #DO NOT USE RegSlice - reg_researchers to get the hrn
- #of the user otherwise will mess up the RegRecord in
- #Resolve, don't know why - SA 08/08/2012
-
- #Only one entry for one user = one slice in testbed_xp table
- #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
- request = self.api.dbsession().query(RegSlice)
- raw_slicerec = request.options(joinedload('reg_researchers')).filter_by(hrn=slice_filter).first()
- #raw_slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
- if raw_slicerec:
- #load_reg_researcher
- #raw_slicerec.reg_researchers
- raw_slicerec = raw_slicerec.__dict__
- logger.debug(" CORTEXLAB_API \t _sql_get_slice_info slice_filter %s \
- raw_slicerec %s" % (slice_filter, raw_slicerec))
- slicerec = raw_slicerec
- #only one researcher per slice so take the first one
- #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
- #del slicerec['reg_researchers']['_sa_instance_state']
- return slicerec
-
- else:
- return None
-
- def _sql_get_slice_info_from_user(self, slice_filter):
- """
- Get the slice record based on the user recordid by using a joinedload
- on the relationship reg_slices_as_researcher. Format the sql record
- into a dict with the mandatory fields for user and slice.
- :returns: dict with slice record and user record if the record was found
- based on the user's id, None if not..
- :rtype:dict or None..
- """
- #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- request = self.api.dbsession().query(RegUser)
- raw_slicerec = request.options(joinedload('reg_slices_as_researcher')).filter_by(record_id=slice_filter).first()
- #raw_slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- #Put it in correct order
- user_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'email', 'pointer']
- slice_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'pointer']
- if raw_slicerec:
- #raw_slicerec.reg_slices_as_researcher
- raw_slicerec = raw_slicerec.__dict__
- slicerec = {}
- slicerec = \
- dict([(k, raw_slicerec[
- 'reg_slices_as_researcher'][0].__dict__[k])
- for k in slice_needed_fields])
- slicerec['reg_researchers'] = dict([(k, raw_slicerec[k])
- for k in user_needed_fields])
- #TODO Handle multiple slices for one user SA 10/12/12
- #for now only take the first slice record associated to the rec user
- ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
- #del raw_slicerec['reg_slices_as_researcher']
- #slicerec['reg_researchers'] = raw_slicerec
- ##del slicerec['_sa_instance_state']
-
- return slicerec
-
- else:
- return None
-
-
- def _get_slice_records(self, slice_filter=None,
- slice_filter_type=None):
- """
- Get the slice record depending on the slice filter and its type.
- :param slice_filter: Can be either the slice hrn or the user's record
- id.
- :type slice_filter: string
- :param slice_filter_type: describes the slice filter type used, can be
- slice_hrn or record_id_user
- :type: string
- :returns: the slice record
- :rtype:dict
- .. seealso::_sql_get_slice_info_from_user
- .. seealso:: _sql_get_slice_info
- """
-
- #Get list of slices based on the slice hrn
- if slice_filter_type == 'slice_hrn':
-
- #if get_authority(slice_filter) == self.root_auth:
- #login = slice_filter.split(".")[1].split("_")[0]
-
- slicerec = self._sql_get_slice_info(slice_filter)
-
- if slicerec is None:
- return None
- #return login, None
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
-
- slicerec = self._sql_get_slice_info_from_user(slice_filter)
-
- if slicerec:
- fixed_slicerec_dict = slicerec
- #At this point if there is no login it means
- #record_id_user filter has been used for filtering
- #if login is None :
- ##If theslice record is from iotlab
- #if fixed_slicerec_dict['peer_authority'] is None:
- #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
- #return login, fixed_slicerec_dict
- return fixed_slicerec_dict
- else:
- return None
-
-
-
- def GetSlices(self, slice_filter=None, slice_filter_type=None,
- login=None):
- """Get the slice records from the sfa db and add lease information
- if any.
-
- :param slice_filter: can be the slice hrn or slice record id in the db
- depending on the slice_filter_type.
- :param slice_filter_type: defines the type of the filtering used, Can be
- either 'slice_hrn' or 'record_id'.
- :type slice_filter: string
- :type slice_filter_type: string
- :returns: a slice dict if slice_filter and slice_filter_type
- are specified and a matching entry is found in the db. The result
- is put into a list.Or a list of slice dictionnaries if no filters
- arespecified.
-
- :rtype: list
-
- """
- #login = None
- authorized_filter_types_list = ['slice_hrn', 'record_id_user']
- return_slicerec_dictlist = []
-
- #First try to get information on the slice based on the filter provided
- if slice_filter_type in authorized_filter_types_list:
- fixed_slicerec_dict = self._get_slice_records(slice_filter,
- slice_filter_type)
- # if the slice was not found in the sfa db
- if fixed_slicerec_dict is None:
- return return_slicerec_dictlist
-
- slice_hrn = fixed_slicerec_dict['hrn']
-
- logger.debug(" CORTEXLAB_API \tGetSlices login %s \
- slice record %s slice_filter %s \
- slice_filter_type %s " % (login,
- fixed_slicerec_dict, slice_filter,
- slice_filter_type))
-
-
- #Now we have the slice record fixed_slicerec_dict, get the
- #jobs associated to this slice
- leases_list = []
-
- leases_list = self.GetLeases(login=login)
- #If no job is running or no job scheduled
- #return only the slice record
- if leases_list == [] and fixed_slicerec_dict:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
-
- # if the jobs running don't belong to the user/slice we are looking
- # for
- leases_hrn = [lease['slice_hrn'] for lease in leases_list]
- if slice_hrn not in leases_hrn:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
- #If several experiments for one slice , put the slice record into
- # each lease information dict
- for lease in leases_list:
- slicerec_dict = {}
- logger.debug("CORTEXLAB_API.PY \tGetSlices slice_filter %s \
- \t lease['slice_hrn'] %s"
- % (slice_filter, lease['slice_hrn']))
- if lease['slice_hrn'] == slice_hrn:
- slicerec_dict['experiment_id'] = lease['lease_id']
- #Update lease dict with the slice record
- if fixed_slicerec_dict:
- fixed_slicerec_dict['experiment_id'] = []
- fixed_slicerec_dict['experiment_id'].append(
- slicerec_dict['experiment_id'])
- slicerec_dict.update(fixed_slicerec_dict)
-
- slicerec_dict['slice_hrn'] = lease['slice_hrn']
- slicerec_dict['hrn'] = lease['slice_hrn']
- slicerec_dict['user'] = lease['user']
- slicerec_dict.update(
- {'list_node_ids':
- {'hostname': lease['reserved_nodes']}})
- slicerec_dict.update({'node_ids': lease['reserved_nodes']})
-
-
- return_slicerec_dictlist.append(slicerec_dict)
-
-
- logger.debug("CORTEXLAB_API.PY \tGetSlices \
- slicerec_dict %s return_slicerec_dictlist %s \
- lease['reserved_nodes'] \
- %s" % (slicerec_dict, return_slicerec_dictlist,
- lease['reserved_nodes']))
-
- logger.debug("CORTEXLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s"
- % (return_slicerec_dictlist))
-
- return return_slicerec_dictlist
-
-
- else:
- #Get all slices from the cortexlab sfa database , get the user info
- # as well at the same time put them in dict format
- request = self.api.dbsession().query(RegSlice)
- query_slice_list = \
- request.options(joinedload('reg_researchers')).all()
-
- for record in query_slice_list:
- tmp = record.__dict__
- tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- return_slicerec_dictlist.append(tmp)
-
-
- #Get all the experiments reserved nodes
- leases_list = self.testbed_shell.GetReservedNodes()
-
- for fixed_slicerec_dict in return_slicerec_dictlist:
- slicerec_dict = {}
- #Check if the slice belongs to a cortexlab user
- if fixed_slicerec_dict['peer_authority'] is None:
- owner = fixed_slicerec_dict['hrn'].split(
- ".")[1].split("_")[0]
- else:
- owner = None
- for lease in leases_list:
- if owner == lease['user']:
- slicerec_dict['experiment_id'] = lease['lease_id']
-
- #for reserved_node in lease['reserved_nodes']:
- logger.debug("CORTEXLAB_API.PY \tGetSlices lease %s "
- % (lease))
- slicerec_dict.update(fixed_slicerec_dict)
- slicerec_dict.update({'node_ids':
- lease['reserved_nodes']})
- slicerec_dict.update({'list_node_ids':
- {'hostname':
- lease['reserved_nodes']}})
-
-
- fixed_slicerec_dict.update(slicerec_dict)
-
- logger.debug("CORTEXLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s \t slice_filter %s " \
- %(return_slicerec_dictlist, slice_filter))
-
- return return_slicerec_dictlist
-
- def AddLeases(self, hostname_list, slice_record,
- lease_start_time, lease_duration):
-
- """Creates an experiment on the testbed corresponding to the information
- provided as parameters. Adds the experiment id and the slice hrn in the
- lease table on the additional sfa database so that we are able to know
- which slice has which nodes.
-
- :param hostname_list: list of nodes' OAR hostnames.
- :param slice_record: sfa slice record, must contain login and hrn.
- :param lease_start_time: starting time , unix timestamp format
- :param lease_duration: duration in minutes
-
- :type hostname_list: list
- :type slice_record: dict
- :type lease_start_time: integer
- :type lease_duration: integer
- :returns: experiment_id, can be None if the job request failed.
-
- """
- logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases hostname_list %s \
- slice_record %s lease_start_time %s lease_duration %s "\
- %( hostname_list, slice_record , lease_start_time, \
- lease_duration))
-
- username = slice_record['login']
-
- experiment_id = self.testbed_shell.LaunchExperimentOnTestbed(
- hostname_list,
- slice_record['hrn'],
- lease_start_time, lease_duration,
- username)
- if experiment_id is not None:
- start_time = \
- datetime.fromtimestamp(int(lease_start_time)).\
- strftime(self.testbed_shell.time_format)
- end_time = lease_start_time + lease_duration
-
-
- logger.debug("CORTEXLAB_API \t AddLeases TURN ON LOGGING SQL \
- %s %s %s "%(slice_record['hrn'], experiment_id, end_time))
-
-
- logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases %s %s %s " \
- %(type(slice_record['hrn']), type(experiment_id),
- type(end_time)))
-
- testbed_xp_row = LeaseTableXP(slice_hrn=slice_record['hrn'],
- experiment_id=experiment_id,
- end_time=end_time)
-
- logger.debug("CORTEXLAB_API \t AddLeases testbed_xp_row %s" \
- %(testbed_xp_row))
- self.api.dbsession().add(testbed_xp_row)
- self.api.dbsession().commit()
-
- logger.debug("CORTEXLAB_API \t AddLeases hostname_list \
- start_time %s " %(start_time))
-
- return experiment_id
-
-
- def GetLeases(self, lease_filter_dict=None, login=None):
- """
- Get the list of leases from testbed with complete information
- about which slice owns which jobs and nodes.
- Two purposes:
- -Fetch all the experiment from the testbed (running, waiting..)
- complete the reservation information with slice hrn
- found in lease_table . If not available in the table,
- assume it is a iotlab slice.
- -Updates the iotlab table, deleting jobs when necessary.
-
- :returns: reservation_list, list of dictionaries with 'lease_id',
- 'reserved_nodes','slice_id', 'state', 'user', 'component_id_list',
- 'slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list
-
- """
-
- unfiltered_reservation_list = self.testbed_shell.GetReservedNodes(login)
-
- reservation_list = []
- #Find the slice associated with this user iotlab ldap uid
- logger.debug(" CORTEXLAB \tGetLeases login %s\
- unfiltered_reservation_list %s "
- % (login, unfiltered_reservation_list))
- #Create user dict first to avoid looking several times for
- #the same user in LDAP SA 27/07/12
- experiment_id_list = []
- jobs_psql_query = self.api.dbsession().query(LeaseTableXP).all()
- jobs_psql_dict = dict([(row.experiment_id, row.__dict__)
- for row in jobs_psql_query])
- #jobs_psql_dict = jobs_psql_dict)
- logger.debug("CORTEXLAB \tGetLeases jobs_psql_dict %s"
- % (jobs_psql_dict))
- jobs_psql_id_list = [row.experiment_id for row in jobs_psql_query]
-
- for resa in unfiltered_reservation_list:
- logger.debug("CORTEXLAB \tGetLeases USER %s"
- % (resa['user']))
- #Construct list of jobs (runing, waiting..) from scheduler
- experiment_id_list.append(resa['lease_id'])
- #If there is information on the job in IOTLAB DB ]
- #(slice used and job id)
- if resa['lease_id'] in jobs_psql_dict:
- job_info = jobs_psql_dict[resa['lease_id']]
- logger.debug("CORTEXLAB \tGetLeases job_info %s"
- % (job_info))
- resa['slice_hrn'] = job_info['slice_hrn']
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a iotlab slice:
- else:
- resa['slice_id'] = hrn_to_urn(self.testbed_shell.root_auth \
- + '.' + resa['user'] + "_slice",
- 'slice')
- resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
-
- resa['component_id_list'] = []
- #Transform the hostnames into urns (component ids)
- for node in resa['reserved_nodes']:
-
- iotlab_xrn = xrn_object(self.testbed_shell.root_auth, node)
- resa['component_id_list'].append(iotlab_xrn.urn)
-
- if lease_filter_dict:
- logger.debug("CORTEXLAB \tGetLeases \
- \r\n leasefilter %s" % ( lease_filter_dict))
-
- # filter_dict_functions = {
- # 'slice_hrn' : IotlabShell.filter_lease_name,
- # 't_from' : IotlabShell.filter_lease_start_time
- # }
- reservation_list = list(unfiltered_reservation_list)
- for filter_type in lease_filter_dict:
- logger.debug("CORTEXLAB \tGetLeases reservation_list %s" \
- % (reservation_list))
- reservation_list = self.testbed_shell.filter_lease(
- reservation_list,filter_type,
- lease_filter_dict[filter_type] )
-
- # Filter the reservation list with a maximum timespan so that the
- # leases and jobs running after this timestamp do not appear
- # in the result leases.
- # if 'start_time' in :
- # if resa['start_time'] < lease_filter_dict['start_time']:
- # reservation_list.append(resa)
-
-
- # if 'name' in lease_filter_dict and \
- # lease_filter_dict['name'] == resa['slice_hrn']:
- # reservation_list.append(resa)
-
-
- if lease_filter_dict is None:
- reservation_list = unfiltered_reservation_list
-
- self.update_experiments_in_lease_table(experiment_id_list,
- jobs_psql_id_list)
-
- logger.debug(" CORTEXLAB.PY \tGetLeases reservation_list %s"
- % (reservation_list))
- return reservation_list
-
- def update_experiments_in_lease_table(self,
- experiment_list_from_testbed, experiment_list_in_db):
- """Cleans the lease_table by deleting expired and cancelled jobs.
-
- Compares the list of experiment ids given by the testbed with the
- experiment ids that are already in the database, deletes the
- experiments that are no longer in the testbed experiment id list.
-
- :param experiment_list_from_testbed: list of experiment ids coming
- from testbed
- :type experiment_list_from_testbed: list
- :param experiment_list_in_db: list of experiment ids from the sfa
- additionnal database.
- :type experiment_list_in_db: list
-
- :returns: None
- """
- #Turn the list into a set
- set_experiment_list_in_db = set(experiment_list_in_db)
-
- kept_experiments = set(experiment_list_from_testbed).intersection(set_experiment_list_in_db)
- logger.debug("\r\n \t update_experiments_in_lease_table \
- experiment_list_in_db %s \r\n \
- experiment_list_from_testbed %s \
- kept_experiments %s "
- % (set_experiment_list_in_db,
- experiment_list_from_testbed, kept_experiments))
- deleted_experiments = set_experiment_list_in_db.difference(
- kept_experiments)
- deleted_experiments = list(deleted_experiments)
- if len(deleted_experiments) > 0:
- request = self.api.dbsession().query(LeaseTableXP)
- request.filter(LeaseTableXP.experiment_id.in_(deleted_experiments)).delete(synchronize_session='fetch')
- self.api.dbsession().commit()
- return
-
-
- def AddSlice(self, slice_record, user_record):
- """
-
- Add slice to the local cortexlab sfa tables if the slice comes
- from a federated site and is not yet in the cortexlab sfa DB,
- although the user has already a LDAP login.
- Called by verify_slice during lease/sliver creation.
-
- :param slice_record: record of slice, must contain hrn, gid, slice_id
- and authority of the slice.
- :type slice_record: dictionary
- :param user_record: record of the user
- :type user_record: RegUser
-
- """
-
- sfa_record = RegSlice(hrn=slice_record['hrn'],
- gid=slice_record['gid'],
- pointer=slice_record['slice_id'],
- authority=slice_record['authority'])
- logger.debug("CORTEXLAB_API.PY AddSlice sfa_record %s user_record %s"
- % (sfa_record, user_record))
- sfa_record.just_created()
- self.api.dbsession().add(sfa_record)
- self.api.dbsession().commit()
- #Update the reg-researcher dependance table
- sfa_record.reg_researchers = [user_record]
- self.api.dbsession().commit()
-
- return
-
- def augment_records_with_testbed_info(self, record_list):
- """
-
- Adds specific testbed info to the records.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- """
- return self.fill_record_info(record_list)
-
- def fill_record_info(self, record_list):
- """
-
- For each SFA record, fill in the iotlab specific and SFA specific
- fields in the record.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- .. warning:: Should not be modifying record_list directly because modi
- fication are kept outside the method's scope. Howerver, there is no
- other way to do it given the way it's called in registry manager.
-
- """
-
- logger.debug("CORTEXLABDRIVER \tfill_record_info records %s "
- % (record_list))
- if not isinstance(record_list, list):
- record_list = [record_list]
-
- try:
- for record in record_list:
-
- if str(record['type']) == 'node':
- # look for node info using GetNodes
- # the record is about one node only
- filter_dict = {'hrn': [record['hrn']]}
- node_info = self.testbed_shell.GetNodes(filter_dict)
- # the node_info is about one node only, but it is formatted
- # as a list
- record.update(node_info[0])
- logger.debug("CORTEXLABDRIVER.PY \t \
- fill_record_info NODE" % (record))
-
- #If the record is a SFA slice record, then add information
- #about the user of this slice. This kind of
- #information is in the Iotlab's DB.
- if str(record['type']) == 'slice':
- if 'reg_researchers' in record and isinstance(record
- ['reg_researchers'],
- list):
- record['reg_researchers'] = \
- record['reg_researchers'][0].__dict__
- record.update(
- {'PI': [record['reg_researchers']['hrn']],
- 'researcher': [record['reg_researchers']['hrn']],
- 'name': record['hrn'],
- 'experiment_id': [],
- 'node_ids': [],
- 'person_ids': [record['reg_researchers']
- ['record_id']],
- # For client_helper.py compatibility
- 'geni_urn': '',
- # For client_helper.py compatibility
- 'keys': '',
- # For client_helper.py compatibility
- 'key_ids': ''})
-
- #Get slice record and job id if any.
- recslice_list = self.GetSlices(
- slice_filter=str(record['hrn']),
- slice_filter_type='slice_hrn')
-
- logger.debug("CORTEXLABDRIVER \tfill_record_info \
- TYPE SLICE RECUSER record['hrn'] %s record['experiment_id']\
- %s " % (record['hrn'], record['experiment_id']))
- del record['reg_researchers']
- try:
- for rec in recslice_list:
- logger.debug("CORTEXLABDRIVER\r\n \t \
- fill_record_info experiment_id %s "
- % (rec['experiment_id']))
-
- record['node_ids'] = [self.testbed_shell.root_auth +
- '.' + hostname for hostname
- in rec['node_ids']]
- except KeyError:
- pass
-
- logger.debug("CORTEXLABDRIVER.PY \t fill_record_info SLICE \
- recslice_list %s \r\n \t RECORD %s \r\n \
- \r\n" % (recslice_list, record))
-
- if str(record['type']) == 'user':
- #The record is a SFA user record.
- #Get the information about his slice from Iotlab's DB
- #and add it to the user record.
- recslice_list = self.GetSlices(
- slice_filter=record['record_id'],
- slice_filter_type='record_id_user')
-
- logger.debug("CORTEXLABDRIVER.PY \t fill_record_info \
- TYPE USER recslice_list %s \r\n \t RECORD %s \r\n"
- % (recslice_list, record))
- #Append slice record in records list,
- #therefore fetches user and slice info again(one more loop)
- #Will update PIs and researcher for the slice
-
- recuser = recslice_list[0]['reg_researchers']
- logger.debug("CORTEXLABDRIVER.PY \t fill_record_info USER \
- recuser %s \r\n \r\n" % (recuser))
- recslice = {}
- recslice = recslice_list[0]
- recslice.update(
- {'PI': [recuser['hrn']],
- 'researcher': [recuser['hrn']],
- 'name': recuser['hrn'],
- 'node_ids': [],
- 'experiment_id': [],
- 'person_ids': [recuser['record_id']]})
- try:
- for rec in recslice_list:
- recslice['experiment_id'].append(rec['experiment_id'])
- except KeyError:
- pass
-
- recslice.update({'type': 'slice',
- 'hrn': recslice_list[0]['hrn']})
-
- #GetPersons takes [] as filters
- user_cortexlab = self.testbed_shell.GetPersons([record])
-
- record.update(user_cortexlab[0])
- #For client_helper.py compatibility
- record.update(
- {'geni_urn': '',
- 'keys': '',
- 'key_ids': ''})
- record_list.append(recslice)
-
- logger.debug("CORTEXLABDRIVER.PY \t \
- fill_record_info ADDING SLICE\
- INFO TO USER records %s" % (record_list))
-
- except TypeError, error:
- logger.log_exc("CORTEXLABDRIVER \t fill_record_info EXCEPTION %s"
- % (error))
-
- return record_list
-
- def sliver_status(self, slice_urn, slice_hrn):
- """
- Receive a status request for slice named urn/hrn
- urn:publicid:IDN+iotlab+nturro_slice hrn iotlab.nturro_slice
- shall return a structure as described in
- http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
- NT : not sure if we should implement this or not, but used by sface.
-
- :param slice_urn: slice urn
- :type slice_urn: string
- :param slice_hrn: slice hrn
- :type slice_hrn: string
-
- """
-
- #First get the slice with the slice hrn
- slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if len(slice_list) == 0:
- raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
-
- #Used for fetching the user info witch comes along the slice info
- one_slice = slice_list[0]
-
- #Make a list of all the nodes hostnames in use for this slice
- slice_nodes_list = []
- slice_nodes_list = one_slice['node_ids']
- #Get all the corresponding nodes details
- nodes_all = self.testbed_shell.GetNodes(
- {'hostname': slice_nodes_list},
- ['node_id', 'hostname', 'site', 'boot_state'])
- nodeall_byhostname = dict([(one_node['hostname'], one_node)
- for one_node in nodes_all])
-
- for single_slice in slice_list:
- #For compatibility
- top_level_status = 'empty'
- result = {}
- result.fromkeys(
- ['geni_urn', 'geni_error', 'cortexlab_login', 'geni_status',
- 'geni_resources'], None)
- # result.fromkeys(\
- # ['geni_urn','geni_error', 'pl_login','geni_status',
- # 'geni_resources'], None)
- # result['pl_login'] = one_slice['reg_researchers'][0].hrn
- result['cortexlab_login'] = one_slice['user']
- logger.debug("Slabdriver - sliver_status Sliver status \
- urn %s hrn %s single_slice %s \r\n "
- % (slice_urn, slice_hrn, single_slice))
-
- if 'node_ids' not in single_slice:
- #No job in the slice
- result['geni_status'] = top_level_status
- result['geni_resources'] = []
- return result
-
- top_level_status = 'ready'
-
- #A job is running on Iotlab for this slice
- # report about the local nodes that are in the slice only
-
- result['geni_urn'] = slice_urn
-
- resources = []
- for node_hostname in single_slice['node_ids']:
- res = {}
- res['cortexlab_hostname'] = node_hostname
- res['cortexlab_boot_state'] = \
- nodeall_byhostname[node_hostname]['boot_state']
-
- sliver_id = Xrn(
- slice_urn, type='slice',
- id=nodeall_byhostname[node_hostname]['node_id']).urn
-
- res['geni_urn'] = sliver_id
- #node_name = node['hostname']
- if nodeall_byhostname[node_hostname]['boot_state'] == 'Alive':
-
- res['geni_status'] = 'ready'
- else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
- logger.debug("CORTEXLABDRIVER \tsliver_statusresources %s res %s "
- % (resources, res))
- return result
-
- def get_user_record(self, hrn):
- """
-
- Returns the user record based on the hrn from the SFA DB .
-
- :param hrn: user's hrn
- :type hrn: string
- :returns: user record from SFA database
- :rtype: RegUser
-
- """
- return self.api.dbsession().query(RegRecord).filter_by(hrn=hrn).first()
-
- def testbed_name(self):
- """
-
- Returns testbed's name.
- :returns: testbed authority name.
- :rtype: string
-
- """
- return self.hrn
-
-
- def _get_requested_leases_list(self, rspec):
- """
- Process leases in rspec depending on the rspec version (format)
- type. Find the lease requests in the rspec and creates
- a lease request list with the mandatory information ( nodes,
- start time and duration) of the valid leases (duration above or
- equal to the iotlab experiment minimum duration).
-
- :param rspec: rspec request received.
- :type rspec: RSpec
- :returns: list of lease requests found in the rspec
- :rtype: list
- """
- requested_lease_list = []
- for lease in rspec.version.get_leases():
- single_requested_lease = {}
- logger.debug("CORTEXLABDRIVER.PY \t \
- _get_requested_leases_list lease %s " % (lease))
-
- if not lease.get('lease_id'):
- if get_authority(lease['component_id']) == \
- self.testbed_shell.root_auth:
- single_requested_lease['hostname'] = \
- xrn_to_hostname(\
- lease.get('component_id').strip())
- single_requested_lease['start_time'] = \
- lease.get('start_time')
- single_requested_lease['duration'] = lease.get('duration')
- #Check the experiment's duration is valid before adding
- #the lease to the requested leases list
- duration_in_seconds = \
- int(single_requested_lease['duration'])
- if duration_in_seconds >= self.testbed_shell.GetMinExperimentDurationInGranularity():
- requested_lease_list.append(single_requested_lease)
-
- return requested_lease_list
-
- @staticmethod
- def _group_leases_by_start_time(requested_lease_list):
- """
- Create dict of leases by start_time, regrouping nodes reserved
- at the same time, for the same amount of time so as to
- define one job on OAR.
-
- :param requested_lease_list: list of leases
- :type requested_lease_list: list
- :returns: Dictionary with key = start time, value = list of leases
- with the same start time.
- :rtype: dictionary
-
- """
-
- requested_xp_dict = {}
- for lease in requested_lease_list:
-
- #In case it is an asap experiment start_time is empty
- if lease['start_time'] == '':
- lease['start_time'] = '0'
-
- if lease['start_time'] not in requested_xp_dict:
- if isinstance(lease['hostname'], str):
- lease['hostname'] = [lease['hostname']]
-
- requested_xp_dict[lease['start_time']] = lease
-
- else:
- job_lease = requested_xp_dict[lease['start_time']]
- if lease['duration'] == job_lease['duration']:
- job_lease['hostname'].append(lease['hostname'])
-
- return requested_xp_dict
-
-
- def _process_requested_xp_dict(self, rspec):
- """
- Turns the requested leases and information into a dictionary
- of requested jobs, grouped by starting time.
-
- :param rspec: RSpec received
- :type rspec : RSpec
- :rtype: dictionary
-
- """
- requested_lease_list = self._get_requested_leases_list(rspec)
- logger.debug("CORTEXLABDRIVER _process_requested_xp_dict \
- requested_lease_list %s" % (requested_lease_list))
- xp_dict = self._group_leases_by_start_time(requested_lease_list)
- logger.debug("CORTEXLABDRIVER _process_requested_xp_dict xp_dict\
- %s" % (xp_dict))
-
- return xp_dict
-
-
-
- def delete(self, slice_urns, options=None):
- """
- Deletes the lease associated with the slice hrn and the credentials
- if the slice belongs to iotlab. Answer to DeleteSliver.
-
- :param slice_urn: urn of the slice
- :type slice_urn: string
-
-
- :returns: 1 if the slice to delete was not found on iotlab,
- True if the deletion was successful, False otherwise otherwise.
-
- .. note:: Should really be named delete_leases because iotlab does
- not have any slivers, but only deals with leases. However,
- SFA api only have delete_sliver define so far. SA 13/05/2013
- .. note:: creds are unused, and are not used either in the dummy driver
- delete_sliver .
- """
- if options is None: options={}
- # collect sliver ids so we can update sliver allocation states after
- # we remove the slivers.
- aggregate = CortexlabAggregate(self)
- slivers = aggregate.get_slivers(slice_urns)
- if slivers:
- # slice_id = slivers[0]['slice_id']
- node_ids = []
- sliver_ids = []
- sliver_jobs_dict = {}
- for sliver in slivers:
- node_ids.append(sliver['node_id'])
- sliver_ids.append(sliver['sliver_id'])
- job_id = sliver['sliver_id'].split('+')[-1].split('-')[0]
- sliver_jobs_dict[job_id] = sliver['sliver_id']
- logger.debug("CORTEXLABDRIVER.PY delete_sliver slivers %s slice_urns %s"
- % (slivers, slice_urns))
- slice_hrn = urn_to_hrn(slice_urns[0])[0]
-
- sfa_slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if not sfa_slice_list:
- return 1
-
- #Delete all leases in the slice
- for sfa_slice in sfa_slice_list:
- logger.debug("CORTEXLABDRIVER.PY delete_sliver slice %s" \
- % (sfa_slice))
- slices = CortexlabSlices(self)
- # determine if this is a peer slice
-
- peer = slices.get_peer(slice_hrn)
-
- logger.debug("CORTEXLABDRIVER.PY delete_sliver peer %s \
- \r\n \t sfa_slice %s " % (peer, sfa_slice))
- testbed_bool_ans = self.testbed_shell.DeleteSliceFromNodes(sfa_slice)
- for job_id in testbed_bool_ans:
- # if the job has not been successfully deleted
- # don't delete the associated sliver
- # remove it from the sliver list
- if testbed_bool_ans[job_id] is False:
- sliver = sliver_jobs_dict[job_id]
- sliver_ids.remove(sliver)
- try:
-
- dbsession = self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
- except :
- logger.log_exc("CORTEXLABDRIVER.PY delete error ")
-
- # prepare return struct
- geni_slivers = []
- for sliver in slivers:
- geni_slivers.append(
- {'geni_sliver_urn': sliver['sliver_id'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': datetime_to_string(utcparse(sliver['expires']))})
- return geni_slivers
-
- def list_slices(self, creds, options):
- """Answer to ListSlices.
-
- List slices belonging to iotlab, returns slice urns list.
- No caching used. Options unused but are defined in the SFA method
- api prototype.
-
- :returns: slice urns list
- :rtype: list
-
- .. note:: creds are unused- SA 12/12/13
- """
- # look in cache first
- #if self.cache:
- #slices = self.cache.get('slices')
- #if slices:
- #logger.debug("PlDriver.list_slices returns from cache")
- #return slices
-
- # get data from db
-
- slices = self.GetSlices()
- logger.debug("CORTEXLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n"
- % (slices))
- slice_hrns = [iotlab_slice['hrn'] for iotlab_slice in slices]
-
- slice_urns = [hrn_to_urn(slice_hrn, 'slice')
- for slice_hrn in slice_hrns]
-
- # cache the result
- #if self.cache:
- #logger.debug ("IotlabDriver.list_slices stores value in cache")
- #self.cache.add('slices', slice_urns)
-
- return slice_urns
-
-
- def register(self, sfa_record, hrn, pub_key):
- """
- Adding new user, slice, node or site should not be handled
- by SFA.
-
- ..warnings:: should not be used. Different components are in charge of
- doing this task. Adding nodes = OAR
- Adding users = LDAP Iotlab
- Adding slice = Import from LDAP users
- Adding site = OAR
-
- :param sfa_record: record provided by the client of the
- Register API call.
- :type sfa_record: dict
- :param pub_key: public key of the user
- :type pub_key: string
-
- .. note:: DOES NOTHING. Returns -1.
-
- """
- return -1
-
- def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
- """
- No site or node record update allowed in Iotlab. The only modifications
- authorized here are key deletion/addition on an existing user and
- password change. On an existing user, CAN NOT BE MODIFIED: 'first_name',
- 'last_name', 'email'. DOES NOT EXIST IN SENSLAB: 'phone', 'url', 'bio',
- 'title', 'accepted_aup'. A slice is bound to its user, so modifying the
- user's ssh key should nmodify the slice's GID after an import procedure.
-
- :param old_sfa_record: what is in the db for this hrn
- :param new_sfa_record: what was passed to the update call
- :param new_key: the new user's public key
- :param hrn: the user's sfa hrn
- :type old_sfa_record: dict
- :type new_sfa_record: dict
- :type new_key: string
- :type hrn: string
-
- TODO: needs review
- .. warning:: SA 12/12/13 - Removed. should be done in iotlabimporter
- since users, keys and slice are managed by the LDAP.
-
- """
- # pointer = old_sfa_record['pointer']
- # old_sfa_record_type = old_sfa_record['type']
-
- # # new_key implemented for users only
- # if new_key and old_sfa_record_type not in ['user']:
- # raise UnknownSfaType(old_sfa_record_type)
-
- # if old_sfa_record_type == "user":
- # update_fields = {}
- # all_fields = new_sfa_record
- # for key in all_fields.keys():
- # if key in ['key', 'password']:
- # update_fields[key] = all_fields[key]
-
- # if new_key:
- # # must check this key against the previous one if it exists
- # persons = self.testbed_shell.GetPersons([old_sfa_record])
- # person = persons[0]
- # keys = [person['pkey']]
- # #Get all the person's keys
- # keys_dict = self.GetKeys(keys)
-
- # # Delete all stale keys, meaning the user has only one key
- # #at a time
- # #TODO: do we really want to delete all the other keys?
- # #Is this a problem with the GID generation to have multiple
- # #keys? SA 30/05/13
- # key_exists = False
- # if key in keys_dict:
- # key_exists = True
- # else:
- # #remove all the other keys
- # for key in keys_dict:
- # self.testbed_shell.DeleteKey(person, key)
- # self.testbed_shell.AddPersonKey(
- # person, {'sshPublicKey': person['pkey']},
- # {'sshPublicKey': new_key})
- logger.warning ("UNDEFINED - Update should be done by the \
- iotlabimporter")
- return True
-
-
- def remove(self, sfa_record):
- """
-
- Removes users only. Mark the user as disabled in LDAP. The user and his
- slice are then deleted from the db by running an import on the registry.
-
- :param sfa_record: record is the existing sfa record in the db
- :type sfa_record: dict
-
- ..warning::As fas as the slice is concerned, here only the leases are
- removed from the slice. The slice is record itself is not removed
- from the db.
-
- TODO: needs review
-
- TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
-
- TODO: return boolean for the slice part
- """
- sfa_record_type = sfa_record['type']
- hrn = sfa_record['hrn']
- if sfa_record_type == 'user':
-
- #get user from iotlab ldap
- person = self.testbed_shell.GetPersons(sfa_record)
- #No registering at a given site in Iotlab.
- #Once registered to the LDAP, all iotlab sites are
- #accesible.
- if person:
- #Mark account as disabled in ldap
- return self.testbed_shell.DeletePerson(sfa_record)
-
- elif sfa_record_type == 'slice':
- if self.GetSlices(slice_filter=hrn,
- slice_filter_type='slice_hrn'):
- ret = self.testbed_shell.DeleteSlice(sfa_record)
- return True
-
- def check_sliver_credentials(self, creds, urns):
- """Check that the sliver urns belongs to the slice specified in the
- credentials.
-
- :param urns: list of sliver urns.
- :type urns: list.
- :param creds: slice credentials.
- :type creds: Credential object.
-
-
- """
- # build list of cred object hrns
- slice_cred_names = []
- for cred in creds:
- slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
- slicename = IotlabXrn(xrn=slice_cred_hrn).iotlab_slicename()
- slice_cred_names.append(slicename)
-
- # look up slice name of slivers listed in urns arg
-
- slice_ids = []
- for urn in urns:
- sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
- try:
- slice_ids.append(int(sliver_id_parts[0]))
- except ValueError:
- pass
-
- if not slice_ids:
- raise Forbidden("sliver urn not provided")
-
- slices = self.GetSlices(slice_ids)
- sliver_names = [single_slice['name'] for single_slice in slices]
-
- # make sure we have a credential for every specified sliver
- for sliver_name in sliver_names:
- if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
- raise Forbidden(msg)
-
- ########################################
- ########## aggregate oriented
- ########################################
-
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
- def aggregate_version(self):
- """
-
- Returns the testbed's supported rspec advertisement and request
- versions.
- :returns: rspec versions supported ad a dictionary.
- :rtype: dict
-
- """
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- return {
- 'testbed': self.testbed_name(),
- 'geni_request_rspec_versions': request_rspec_versions,
- 'geni_ad_rspec_versions': ad_rspec_versions}
-
-
-
- # first 2 args are None in case of resource discovery
- def list_resources (self, version=None, options=None):
- if options is None: options={}
- aggregate = CortexlabAggregate(self)
- rspec = aggregate.list_resources(version=version, options=options)
- return rspec
-
-
- def describe(self, urns, version, options=None):
- if options is None: options={}
- aggregate = CortexlabAggregate(self)
- return aggregate.describe(urns, version=version, options=options)
-
- def status (self, urns, options=None):
- if options is None: options={}
- aggregate = CortexlabAggregate(self)
- desc = aggregate.describe(urns, version='GENI 3')
- status = {'geni_urn': desc['geni_urn'],
- 'geni_slivers': desc['geni_slivers']}
- return status
-
-
- def allocate (self, urn, rspec_string, expiration, options=None):
- if options is None: options={}
- xrn = Xrn(urn)
- aggregate = CortexlabAggregate(self)
-
- slices = CortexlabSlices(self)
- peer = slices.get_peer(xrn.get_hrn())
- sfa_peer = slices.get_sfa_peer(xrn.get_hrn())
-
- slice_record = None
- users = options.get('geni_users', [])
-
- sfa_users = options.get('sfa_users', [])
- if sfa_users:
- slice_record = sfa_users[0].get('slice_record', [])
-
- # parse rspec
- rspec = RSpec(rspec_string)
- # requested_attributes = rspec.version.get_slice_attributes()
-
- # ensure site record exists
-
- # ensure slice record exists
-
- current_slice = slices.verify_slice(xrn.hrn, slice_record, sfa_peer)
- logger.debug("IOTLABDRIVER.PY \t ===============allocate \t\
- \r\n \r\n current_slice %s" % (current_slice))
- # ensure person records exists
-
- # oui c'est degueulasse, le slice_record se retrouve modifie
- # dans la methode avec les infos du user, els infos sont propagees
- # dans verify_slice_leases
- persons = slices.verify_persons(xrn.hrn, slice_record, users,
- options=options)
- # ensure slice attributes exists
- # slices.verify_slice_attributes(slice, requested_attributes,
- # options=options)
-
- # add/remove slice from nodes
- requested_xp_dict = self._process_requested_xp_dict(rspec)
-
- logger.debug("IOTLABDRIVER.PY \tallocate requested_xp_dict %s "
- % (requested_xp_dict))
- request_nodes = rspec.version.get_nodes_with_slivers()
- nodes_list = []
- for start_time in requested_xp_dict:
- lease = requested_xp_dict[start_time]
- for hostname in lease['hostname']:
- nodes_list.append(hostname)
-
- # nodes = slices.verify_slice_nodes(slice_record,request_nodes, peer)
- logger.debug("IOTLABDRIVER.PY \tallocate nodes_list %s slice_record %s"
- % (nodes_list, slice_record))
-
- # add/remove leases
- rspec_requested_leases = rspec.version.get_leases()
- leases = slices.verify_slice_leases(slice_record,
- requested_xp_dict, peer)
- logger.debug("IOTLABDRIVER.PY \tallocate leases %s \
- rspec_requested_leases %s" % (leases,
- rspec_requested_leases))
- # update sliver allocations
- for hostname in nodes_list:
- client_id = hostname
- node_urn = xrn_object(self.testbed_shell.root_auth, hostname).urn
- component_id = node_urn
- slice_urn = current_slice['reg-urn']
- for lease in leases:
- if hostname in lease['reserved_nodes']:
- index = lease['reserved_nodes'].index(hostname)
- sliver_hrn = '%s.%s-%s' % (self.hrn, lease['lease_id'],
- lease['resource_ids'][index] )
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
- record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
- component_id=component_id,
- slice_urn = slice_urn,
- allocation_state='geni_allocated')
- record.sync(self.api.dbsession())
-
- return aggregate.describe([xrn.get_urn()], version=rspec.version)
-
- def provision(self, urns, options=None):
- if options is None: options={}
- # update users
- slices = CortexlabSlices(self)
- aggregate = CortexlabAggregate(self)
- slivers = aggregate.get_slivers(urns)
- current_slice = slivers[0]
- peer = slices.get_peer(current_slice['hrn'])
- sfa_peer = slices.get_sfa_peer(current_slice['hrn'])
- users = options.get('geni_users', [])
- # persons = slices.verify_persons(current_slice['hrn'],
- # current_slice, users, peer, sfa_peer, options=options)
- # slices.handle_peer(None, None, persons, peer)
- # update sliver allocation states and set them to geni_provisioned
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',
- dbsession)
- version_manager = VersionManager()
- rspec_version = version_manager.get_version(options[
- 'geni_rspec_version'])
- return self.describe(urns, rspec_version, options=options)
-
-
-
+++ /dev/null
-"""
-File used to handle all the nodes querying:
-* get nodes list along with their properties with get_all_nodes
-
-* get sites and their properties with get_sites.
-
-* get nodes involved in leases sorted by lease id, with get_reserved_nodes.
-
-* create a lease (schedule an experiment) with schedule_experiment.
-
-* delete a lease with delete_experiment.
-
-"""
-
-class CortexlabQueryNodes:
- def __init__(self):
-
- pass
-
- def get_all_nodes(self, node_filter_dict=None, return_fields_list=None):
- """
- Get all the nodes and their properties. Called by GetNodes.
- Filtering on nodes properties can be done here or in GetNodes.
- Search for specific nodes if some filters are specified. Returns all
- the nodes properties if return_fields_list is None.
-
-
- :param node_filter_dict: dictionary of lists with node properties. For
- instance, if you want to look for a specific node with its hrn,
- the node_filter_dict should be {'hrn': [hrn_of_the_node]}
- :type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
- :type return_fields_list: list
- :returns: list of dictionaries with node properties
- :rtype: list
-
- TODO: Define which properties have to be listed here. Useful ones:
- node architecture, radio type, position (x,y,z)
- """
- node_dict_list = None
- # Get the nodes here, eventually filter here
- # See iotlabapi.py GetNodes to get the filtering (node_filter_dict and
- # return_fields_list ) part, if necessary
- # Format used in iotlab
- node_dict_list = [
- {'hrn': 'iotlab.wsn430-11.devlille.iot-lab.info',
- 'archi': 'wsn430', 'mobile': 'True',
- 'hostname': 'wsn430-11.devlille.iot-lab.info',
- 'site': 'devlille', 'mobility_type': 'None',
- 'boot_state': 'Suspected',
- 'node_id': 'wsn430-11.devlille.iot-lab.info',
- 'radio': 'cc2420', 'posx': '2.3', 'posy': '2.3',
- 'node_number': 11, 'posz': '1'},
- {'hrn': 'iotlab.wsn430-10.devlille.iot-lab.info',
- 'archi': 'wsn430', 'mobile': 'True',
- 'hostname': 'wsn430-10.devlille.iot-lab.info',
- 'site': 'devlille', 'mobility_type': 'None',
- 'boot_state': 'Alive', 'node_id': 'wsn430-10.devlille.iot-lab.info',
- 'radio': 'cc2420', 'posx': '1.3', 'posy': '2.3', 'node_number': 10,
- 'posz': '1'},
- {'hrn': 'iotlab.wsn430-1.devlille.iot-lab.info',
- 'archi': 'wsn430', 'mobile': 'False',
- 'hostname': 'wsn430-1.devlille.iot-lab.info',
- 'site': 'devlille', 'mobility_type': 'None',
- 'boot_state': 'Alive', 'node_id': 'wsn430-1.devlille.iot-lab.info',
- 'radio': 'cc2420', 'posx': '0.3', 'posy': '0.3', 'node_number': 1,
- 'posz': '1'} ]
- return node_dict_list
-
-
-
-
- def get_sites(self, site_filter_name_list=None, return_fields_list=None):
-
- """Get the different cortexlab sites and for each sites, the nodes
- hostnames on this site.
-
- :param site_filter_name_list: used to specify specific sites
- :param return_fields_list: fields that has to be returned
- :type site_filter_name_list: list
- :type return_fields_list: list
- :rtype: list of dictionaries
- """
- site_dict_list = None
- site_dict_list = [
- {'address_ids': [], 'slice_ids': [], 'name': 'iotlab',
- 'node_ids': [u'wsn430-11.devlille.iot-lab.info',
- u'wsn430-10.devlille.iot-lab.info', u'wsn430-1.devlille.iot-lab.info'],
- 'url': 'https://portal.senslab.info', 'person_ids': [],
- 'site_tag_ids': [], 'enabled': True, 'site': 'devlille',
- 'longitude': '- 2.10336', 'pcu_ids': [], 'max_slivers': None,
- 'max_slices': None, 'ext_consortium_id': None, 'date_created': None,
- 'latitude': '48.83726', 'is_public': True, 'peer_site_id': None,
- 'peer_id': None, 'abbreviated_name': 'iotlab'}]
- # list of dict with mandatory keys ['name', 'node_ids', 'longitude',
- # 'site' ]. Value for key node_ids is a hostname list.
- # See iotlabapi.py GetSites to get the filtering
- return site_dict_list
-
-
- def get_reserved_nodes(self, username):
- """Get list of leases. Get the leases for the username if specified,
- otherwise get all the leases.
- :param username: user's LDAP login
- :type username: string
- :returns: list of reservations dict
- :rtype: list of dictionaries
-
- """
- reserved_nodes_list_dict = None
-
- reserved_nodes_list_dict = [{'lease_id': 1658,
- 'reserved_nodes': [ 'wsn430-11.devlille.iot-lab.info'], 'state':
- 'Waiting', 'user': 'avakian', 'resource_ids': [11],
- 't_from': 1412938800, 't_until': 1412942640}]
-
- return reserved_nodes_list_dict
-
- def schedule_experiment(self, lease_dict):
- """Schedule/ run an experiment based on the information provided in the
- lease dictionary.
-
- :param lease_dict: contains lease_start_time, lease_duration,
- added_nodes, slice_name , slice_user, grain:
- :type lease_dict: dictionary
- :rtype: dict
- """
- answer = {}
- answer['id'] = None #experiment id
- answer['msg'] = None #message in case of error
-
-
- answer['id'] = 1659
-
- # Launch the experiment here
-
- return answer
-
- def delete_experiment(self, experiment_id, username):
- """
- Delete the experiment designated by its experiment id and its
- user.
- TODO: If the username is not necessary to delete the lease, then you can
- remove it from the parameters, given that you propagate the changes
-
- :param experiment_id: experiment identifier
- :type experiment_id : integer
- :param username: user's LDAP login
- :type experiment_id: integer
- :type username: string
- :returns: dict with delete status {'status': True of False}
- :rtype: dict
- """
- # Delete the experiment here. Ret['status'] should be True or False
- # depending if the delete was effective or not.
- ret = {}
- ret['status'] = None
- return ret
+++ /dev/null
-"""
-File defining classes to handle the table in the iotlab dedicated database.
-"""
-
-from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
-# from sfa.util.config import Config
-from sfa.util.sfalogging import logger
-
-from sqlalchemy import Column, Integer, String
-from sqlalchemy import Table, MetaData
-from sqlalchemy.ext.declarative import declarative_base
-
-# from sqlalchemy.dialects import postgresql
-
-from sqlalchemy.exc import NoSuchTableError
-
-
-#Dict holding the columns names of the table as keys
-#and their type, used for creation of the table
-slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
- CASCADE ON UPDATE CASCADE', 'oar_job_id': 'integer DEFAULT -1',
- 'record_id_slice': 'integer', 'slice_hrn': 'text NOT NULL'}
-
-#Dict with all the specific iotlab tables
-tablenames_dict = {'iotlab_xp': slice_table}
-
-
-IotlabBase = declarative_base()
-
-
-class IotlabXP (IotlabBase):
- """ SQL alchemy class to manipulate the rows of the slice_iotlab table in
- iotlab_sfa database. Handles the records representation and creates the
- table if it does not exist yet.
-
- """
- __tablename__ = 'iotlab_xp'
-
- slice_hrn = Column(String)
- job_id = Column(Integer, primary_key=True)
- end_time = Column(Integer, nullable=False)
-
- def __init__(self, slice_hrn=None, job_id=None, end_time=None):
- """
- Defines a row of the slice_iotlab table
- """
- if slice_hrn:
- self.slice_hrn = slice_hrn
- if job_id:
- self.job_id = job_id
- if end_time:
- self.end_time = end_time
-
- def __repr__(self):
- """Prints the SQLAlchemy record to the format defined
- by the function.
- """
- result = "<iotlab_xp : slice_hrn = %s , job_id %s end_time = %s" \
- % (self.slice_hrn, self.job_id, self.end_time)
- result += ">"
- return result
-
-
-class IotlabDB(object):
- """ SQL Alchemy connection class.
- From alchemy.py
- """
- # Stores the unique Singleton instance-
- _connection_singleton = None
- # defines the database name
- dbname = "iotlab_sfa"
-
- class Singleton:
- """
- Class used with this Python singleton design pattern to allow the
- definition of one single instance of iotlab db session in the whole
- code. Wherever a connection to the database is needed, this class
- returns the same instance every time. Removes the need for global
- variable throughout the code.
- """
-
- def __init__(self, config, debug=False):
- self.iotlab_engine = None
- self.iotlab_session = None
- self.url = None
- self.create_iotlab_engine(config, debug)
- self.session()
-
- def create_iotlab_engine(self, config, debug=False):
- """Creates the SQLAlchemy engine, which is the starting point for
- any SQLAlchemy application.
- :param config: configuration object created by SFA based on the
- configuration file in /etc
- :param debug: if set to true, echo and echo pool will be set to true
- as well. If echo is True, all statements as well as a repr() of
- their parameter lists to the engines logger, which defaults to
- sys.stdout. If echo_pool is True, the connection pool will log all
- checkouts/checkins to the logging stream. A python logger can be
- used to configure this logging directly but so far it has not been
- configured. Refer to sql alchemy engine documentation.
-
- :type config: Config instance (sfa.util.config)
- :type debug: bool
-
- """
-
- if debug is True:
- l_echo_pool = True
- l_echo = True
- else:
- l_echo_pool = False
- l_echo = False
- # the former PostgreSQL.py used the psycopg2 directly and was doing
- #self.connection.set_client_encoding("UNICODE")
- # it's unclear how to achieve this in sqlalchemy, nor if it's needed
- # at all
- # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
- # we indeed have /var/lib/pgsql/data/postgresql.conf where
- # this setting is unset, it might be an angle to tweak that if need
- # be try a unix socket first
- # - omitting the hostname does the trick
- unix_url = "postgresql+psycopg2://%s:%s@:%s/%s" \
- % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
- config.SFA_DB_PORT, IotlabDB.dbname)
-
- # the TCP fallback method
- tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s" \
- % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
- config.SFA_DB_HOST, config.SFA_DB_PORT, IotlabDB.dbname)
-
- for url in [unix_url, tcp_url]:
- try:
- self.iotlab_engine = create_engine(
- url, echo_pool=l_echo_pool, echo=l_echo)
- self.check()
- self.url = url
- return
- except:
- pass
- self.iotlab_engine = None
-
- raise Exception("Could not connect to database")
-
- def check(self):
- """ Check if a table exists by trying a selection
- on the table.
-
- """
- self.iotlab_engine.execute("select 1").scalar()
-
-
- def session(self):
- """
- Creates a SQLalchemy session. Once the session object is created
- it should be used throughout the code for all the operations on
- tables for this given database.
-
- """
- if self.iotlab_session is None:
- Session = sessionmaker()
- self.iotlab_session = Session(bind=self.iotlab_engine)
- return self.iotlab_session
-
- def close_session(self):
- """
- Closes connection to database.
-
- """
- if self.iotlab_session is None:
- return
- self.iotlab_session.close()
- self.iotlab_session = None
-
-
- def update_jobs_in_iotlabdb(self, job_oar_list, jobs_psql):
- """ Cleans the iotlab db by deleting expired and cancelled jobs.
-
- Compares the list of job ids given by OAR with the job ids that
- are already in the database, deletes the jobs that are no longer in
- the OAR job id list.
-
- :param job_oar_list: list of job ids coming from OAR
- :type job_oar_list: list
- :param job_psql: list of job ids from the database.
- :type job_psql: list
-
- :returns: None
- """
- #Turn the list into a set
- set_jobs_psql = set(jobs_psql)
-
- kept_jobs = set(job_oar_list).intersection(set_jobs_psql)
- logger.debug("\r\n \t update_jobs_in_iotlabdb jobs_psql %s \r\n \
- job_oar_list %s kept_jobs %s "
- % (set_jobs_psql, job_oar_list, kept_jobs))
- deleted_jobs = set_jobs_psql.difference(kept_jobs)
- deleted_jobs = list(deleted_jobs)
- if len(deleted_jobs) > 0:
- self.iotlab_session.query(IotlabXP).filter(IotlabXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
- self.iotlab_session.commit()
- return
-
- def __init__(self, config, debug=False):
- self.sl_base = IotlabBase
-
- # Check whether we already have an instance
- if IotlabDB._connection_singleton is None:
- IotlabDB._connection_singleton = IotlabDB.Singleton(config, debug)
-
- # Store instance reference as the only member in the handle
- self._EventHandler_singleton = IotlabDB._connection_singleton
-
- def __getattr__(self, aAttr):
- """
- Delegate access to implementation.
-
- :param aAttr: Attribute wanted.
- :returns: Attribute
- """
- return getattr(self._connection_singleton, aAttr)
-
-
-
- # def __setattr__(self, aAttr, aValue):
- # """Delegate access to implementation.
-
- # :param attr: Attribute wanted.
- # :param value: Vaule to be set.
- # :return: Result of operation.
- # """
- # return setattr(self._connection_singleton, aAttr, aValue)
-
- def exists(self, tablename):
- """
- Checks if the table specified as tablename exists.
- :param tablename: name of the table in the db that has to be checked.
- :type tablename: string
- :returns: True if the table exists, False otherwise.
- :rtype: bool
-
- """
- metadata = MetaData(bind=self.iotlab_engine)
- try:
- table = Table(tablename, metadata, autoload=True)
- return True
-
- except NoSuchTableError:
- logger.log_exc("IOTLABPOSTGRES tablename %s does not exist"
- % (tablename))
- return False
-
- def createtable(self):
- """
- Creates all the table sof the engine.
- Uses the global dictionnary holding the tablenames and the table schema.
-
- """
-
- logger.debug("IOTLABPOSTGRES createtable \
- IotlabBase.metadata.sorted_tables %s \r\n engine %s"
- % (IotlabBase.metadata.sorted_tables, self.iotlab_engine))
- IotlabBase.metadata.create_all(self.iotlab_engine)
- return
+++ /dev/null
-"""
-File containing the CortexlabShell, used to interact with nodes, users,
-slices, leases and keys, as well as the dedicated iotlab database and table,
-holding information about which slice is running which job.
-
-"""
-from datetime import datetime
-
-from sfa.util.sfalogging import logger
-from sfa.util.sfatime import SFATIME_FORMAT
-
-from sfa.iotlab.iotlabpostgres import LeaseTableXP
-from sfa.cortexlab.LDAPapi import LDAPapi
-
-
-
-from sfa.iotlab.iotlabxrn import xrn_object
-from sfa.cortexlab.cortexlabnodes import CortexlabQueryNodes
-
-class CortexlabShell():
- """ Class enabled to use LDAP and OAR api calls. """
-
- _MINIMUM_DURATION = 10 # 10 units of granularity 60 s, 10 mins
-
- def __init__(self, config):
- """Creates an instance of OARrestapi and LDAPapi which will be used to
- issue calls to OAR or LDAP methods.
- Set the time format and the testbed granularity used for OAR
- reservation and leases.
-
- :param config: configuration object from sfa.util.config
- :type config: Config object
- """
-
- self.query_sites = CortexlabQueryNodes()
- self.ldap = LDAPapi()
- self.time_format = SFATIME_FORMAT
- self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
- self.grain = 60 # 10 mins lease minimum, 60 sec granularity
- #import logging, logging.handlers
- #from sfa.util.sfalogging import _SfaLogger
- #sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', \
- #level=logging.DEBUG)
- return
-
- @staticmethod
- def GetMinExperimentDurationInGranularity():
- """ Returns the minimum allowed duration for an experiment on the
- testbed. In seconds.
-
- """
- return CortexlabShell._MINIMUM_DURATION
-
- #TODO : Handling OR request in make_ldap_filters_from_records
- #instead of the for loop
- #over the records' list
- def GetPersons(self, person_filter=None):
- """
- Get the enabled users and their properties from Cortexlab LDAP.
- If a filter is specified, looks for the user whose properties match
- the filter, otherwise returns the whole enabled users'list.
-
- :param person_filter: Must be a list of dictionnaries with users
- properties when not set to None.
- :type person_filter: list of dict
-
- :returns: Returns a list of users whose accounts are enabled
- found in ldap.
- :rtype: list of dicts
-
- """
- logger.debug("CORTEXLAB_API \tGetPersons person_filter %s"
- % (person_filter))
- person_list = []
- if person_filter and isinstance(person_filter, list):
- #If we are looking for a list of users (list of dict records)
- #Usually the list contains only one user record
- for searched_attributes in person_filter:
-
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person = self.ldap.LdapFindUser(searched_attributes,
- is_user_enabled=True)
- #If a person was found, append it to the list
- if person:
- person_list.append(person)
-
- #If the list is empty, return None
- if len(person_list) is 0:
- person_list = None
-
- else:
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person_list = self.ldap.LdapFindUser(is_user_enabled=True)
-
- return person_list
-
-
-
- def DeleteOneLease(self, lease_id, username):
- """
-
- Deletes the lease with the specified lease_id and username on OAR by
- posting a delete request to OAR.
-
- :param lease_id: Reservation identifier.
- :param username: user's iotlab login in LDAP.
- :type lease_id: Depends on what tou are using, could be integer or
- string
- :type username: string
-
- :returns: dictionary with the lease id and if delete has been successful
- (True) or no (False)
- :rtype: dict
-
- """
-
- # Here delete the lease specified
- answer = self.query_sites.delete_experiment(lease_id, username)
-
- # If the username is not necessary to delete the lease, then you can
- # remove it from the parameters, given that you propagate the changes
- # Return delete status so that you know if the delete has been
- # successuf or not
-
-
- if answer['status'] is True:
- ret = {lease_id: True}
- else:
- ret = {lease_id: False}
- logger.debug("CORTEXLAB_API \DeleteOneLease lease_id %s \r\n answer %s \
- username %s" % (lease_id, answer, username))
- return ret
-
-
-
- def GetNodesCurrentlyInUse(self):
- """Returns a list of all the nodes involved in a currently running
- experiment (and only the one not available at the moment the call to
- this method is issued)
- :rtype: list of nodes hostnames.
- """
- node_hostnames_list = []
- return node_hostnames_list
-
-
- def GetReservedNodes(self, username=None):
- """ Get list of leases. Get the leases for the username if specified,
- otherwise get all the leases. Finds the nodes hostnames for each
- OAR node identifier.
- :param username: user's LDAP login
- :type username: string
- :returns: list of reservations dict
- :rtype: dict list
- """
-
- #Get the nodes in use and the reserved nodes
- mandatory_sfa_keys = ['reserved_nodes','lease_id']
- reservation_dict_list = \
- self.query_sites.get_reserved_nodes(username = username)
-
- if len(reservation_dict_list) == 0:
- return []
-
- else:
- # Ensure mandatory keys are in the dict
- if not self.ensure_format_is_valid(reservation_dict_list,
- mandatory_sfa_keys):
- raise KeyError, "GetReservedNodes : Missing SFA mandatory keys"
-
-
- return reservation_dict_list
-
- @staticmethod
- def ensure_format_is_valid(list_dictionary_to_check, mandatory_keys_list):
- for entry in list_dictionary_to_check:
- if not all (key in entry for key in mandatory_keys_list):
- return False
- return True
-
- def GetNodes(self, node_filter_dict=None, return_fields_list=None):
- """
-
- Make a list of cortexlab nodes and their properties from information
- given by ?. Search for specific nodes if some filters are
- specified. Nodes properties returned if no return_fields_list given:
- 'hrn','archi','mobile','hostname','site','boot_state','node_id',
- 'radio','posx','posy,'posz'.
-
- :param node_filter_dict: dictionnary of lists with node properties. For
- instance, if you want to look for a specific node with its hrn,
- the node_filter_dict should be {'hrn': [hrn_of_the_node]}
- :type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
- :type return_fields_list: list
- :returns: list of dictionaries with node properties. Mandatory
- properties hrn, site, hostname. Complete list (iotlab) ['hrn',
- 'archi', 'mobile', 'hostname', 'site', 'mobility_type',
- 'boot_state', 'node_id','radio', 'posx', 'posy', 'oar_id', 'posz']
- Radio, archi, mobile and position are useful to help users choose
- the appropriate nodes.
- :rtype: list
-
- :TODO: FILL IN THE BLANKS
- """
-
- # Here get full dict of nodes with all their properties.
- mandatory_sfa_keys = ['hrn', 'site', 'hostname']
- node_list_dict = self.query_sites.get_all_nodes(node_filter_dict,
- return_fields_list)
-
- if len(node_list_dict) == 0:
- return_node_list = []
-
- else:
- # Ensure mandatory keys are in the dict
- if not self.ensure_format_is_valid(node_list_dict,
- mandatory_sfa_keys):
- raise KeyError, "GetNodes : Missing SFA mandatory keys"
-
-
- return_node_list = node_list_dict
- return return_node_list
-
-
-
-
- def GetSites(self, site_filter_name_list=None, return_fields_list=None):
- """Returns the list of Cortexlab's sites with the associated nodes and
- the sites' properties as dictionaries. Used in import.
-
- Site properties:
- ['address_ids', 'slice_ids', 'name', 'node_ids', 'url', 'person_ids',
- 'site_tag_ids', 'enabled', 'site', 'longitude', 'pcu_ids',
- 'max_slivers', 'max_slices', 'ext_consortium_id', 'date_created',
- 'latitude', 'is_public', 'peer_site_id', 'peer_id', 'abbreviated_name']
- can be empty ( []): address_ids, slice_ids, pcu_ids, person_ids,
- site_tag_ids
-
- :param site_filter_name_list: used to specify specific sites
- :param return_fields_list: field that has to be returned
- :type site_filter_name_list: list
- :type return_fields_list: list
- :rtype: list of dicts
-
- """
- site_list_dict = self.query_sites.get_sites(site_filter_name_list,
- return_fields_list)
-
- mandatory_sfa_keys = ['name', 'node_ids', 'longitude','site' ]
-
- if len(site_list_dict) == 0:
- return_site_list = []
-
- else:
- # Ensure mandatory keys are in the dict
- if not self.ensure_format_is_valid(site_list_dict,
- mandatory_sfa_keys):
- raise KeyError, "GetSites : Missing sfa mandatory keys"
-
- return_site_list = site_list_dict
- return return_site_list
-
-
- #TODO : Check rights to delete person
- def DeletePerson(self, person_record):
- """Disable an existing account in cortexlab LDAP.
-
- Users and techs can only delete themselves. PIs can only
- delete themselves and other non-PIs at their sites.
- ins can delete anyone.
-
- :param person_record: user's record
- :type person_record: dict
- :returns: True if successful, False otherwise.
- :rtype: boolean
-
- .. todo:: CHECK THAT ONLY THE USER OR ADMIN CAN DEL HIMSELF.
- """
- #Disable user account in iotlab LDAP
- ret = self.ldap.LdapMarkUserAsDeleted(person_record)
- logger.warning("CORTEXLAB_API DeletePerson %s " % (person_record))
- return ret['bool']
-
- def DeleteSlice(self, slice_record):
- """Deletes the specified slice and kills the jobs associated with
- the slice if any, using DeleteSliceFromNodes.
-
- :param slice_record: record of the slice, must contain experiment_id, user
- :type slice_record: dict
- :returns: True if all the jobs in the slice have been deleted,
- or the list of jobs that could not be deleted otherwise.
- :rtype: list or boolean
-
- .. seealso:: DeleteSliceFromNodes
-
- """
- ret = self.DeleteSliceFromNodes(slice_record)
- delete_failed = None
- for experiment_id in ret:
- if False in ret[experiment_id]:
- if delete_failed is None:
- delete_failed = []
- delete_failed.append(experiment_id)
-
- logger.info("CORTEXLAB_API DeleteSlice %s answer %s"%(slice_record, \
- delete_failed))
- return delete_failed or True
-
-
- #TODO AddPersonKey 04/07/2012 SA
- def AddPersonKey(self, person_uid, old_attributes_dict, new_key_dict):
- """Adds a new key to the specified account. Adds the key to the
- iotlab ldap, provided that the person_uid is valid.
-
- Non-admins can only modify their own keys.
-
- :param person_uid: user's iotlab login in LDAP
- :param old_attributes_dict: dict with the user's old sshPublicKey
- :param new_key_dict: dict with the user's new sshPublicKey
- :type person_uid: string
-
-
- :rtype: Boolean
- :returns: True if the key has been modified, False otherwise.
-
- """
- ret = self.ldap.LdapModify(person_uid, old_attributes_dict, \
- new_key_dict)
- logger.warning("CORTEXLAB_API AddPersonKey EMPTY - DO NOTHING \r\n ")
- return ret['bool']
-
- def DeleteLeases(self, leases_id_list, slice_hrn):
- """
-
- Deletes several leases, based on their experiment ids and the slice
- they are associated with. Uses DeleteOneLease to delete the
- experiment on the testbed. Note that one slice can contain multiple
- experiments, and in this
- case all the experiments in the leases_id_list MUST belong to this
- same slice, since there is only one slice hrn provided here.
-
- :param leases_id_list: list of job ids that belong to the slice whose
- slice hrn is provided.
- :param slice_hrn: the slice hrn.
- :type slice_hrn: string
-
- .. warning:: Does not have a return value since there was no easy
- way to handle failure when dealing with multiple job delete. Plus,
- there was no easy way to report it to the user.
-
- """
- logger.debug("CORTEXLAB_API DeleteLeases leases_id_list %s slice_hrn %s \
- \r\n " %(leases_id_list, slice_hrn))
- for experiment_id in leases_id_list:
- self.DeleteOneLease(experiment_id, slice_hrn)
-
- return
-
-
- @staticmethod
- def _process_walltime(duration):
- """ Calculates the walltime in seconds from the duration in H:M:S
- specified in the RSpec.
-
- """
- if duration:
- # Fixing the walltime by adding a few delays.
- # First put the walltime in seconds oarAdditionalDelay = 20;
- # additional delay for /bin/sleep command to
- # take in account prologue and epilogue scripts execution
- # int walltimeAdditionalDelay = 240; additional delay
- #for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
- #in oar.conf
- # Put the duration in seconds first
- #desired_walltime = duration * 60
- desired_walltime = duration
- total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
- sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
- walltime = []
- #Put the walltime back in str form
- #First get the hours
- walltime.append(str(total_walltime / 3600))
- total_walltime = total_walltime - 3600 * int(walltime[0])
- #Get the remaining minutes
- walltime.append(str(total_walltime / 60))
- total_walltime = total_walltime - 60 * int(walltime[1])
- #Get the seconds
- walltime.append(str(total_walltime))
-
- else:
- logger.log_exc(" __process_walltime duration null")
-
- return walltime, sleep_walltime
-
- @staticmethod
- def _create_job_structure_request_for_OAR(lease_dict):
- """ Creates the structure needed for a correct POST on OAR.
- Makes the timestamp transformation into the appropriate format.
- Sends the POST request to create the job with the resources in
- added_nodes.
-
- """
-
- nodeid_list = []
- reqdict = {}
-
-
- reqdict['workdir'] = '/tmp'
- reqdict['resource'] = "{network_address in ("
-
- for node in lease_dict['added_nodes']:
- logger.debug("\r\n \r\n OARrestapi \t \
- __create_job_structure_request_for_OAR node %s" %(node))
-
- # Get the ID of the node
- nodeid = node
- reqdict['resource'] += "'" + nodeid + "', "
- nodeid_list.append(nodeid)
-
- custom_length = len(reqdict['resource'])- 2
- reqdict['resource'] = reqdict['resource'][0:custom_length] + \
- ")}/nodes=" + str(len(nodeid_list))
-
-
- walltime, sleep_walltime = \
- CortexlabShell._process_walltime(\
- int(lease_dict['lease_duration']))
-
-
- reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
- ":" + str(walltime[1]) + ":" + str(walltime[2])
- reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
-
- #In case of a scheduled experiment (not immediate)
- #To run an XP immediately, don't specify date and time in RSpec
- #They will be set to None.
- if lease_dict['lease_start_time'] is not '0':
- #Readable time accepted by OAR
- start_time = datetime.fromtimestamp( \
- int(lease_dict['lease_start_time'])).\
- strftime(lease_dict['time_format'])
- reqdict['reservation'] = start_time
- #If there is not start time, Immediate XP. No need to add special
- # OAR parameters
-
-
- reqdict['type'] = "deploy"
- reqdict['directory'] = ""
- reqdict['name'] = "SFA_" + lease_dict['slice_user']
-
- return reqdict
-
-
- def LaunchExperimentOnTestbed(self, added_nodes, slice_name, \
- lease_start_time, lease_duration, slice_user=None):
-
- """
- Create an experiment request structure based on the information provided
- and schedule/run the experiment on the testbed by reserving the nodes.
- :param added_nodes: list of nodes that belong to the described lease.
- :param slice_name: the slice hrn associated to the lease.
- :param lease_start_time: timestamp of the lease startting time.
- :param lease_duration: lease duration in minutes
-
- """
- lease_dict = {}
- # Add in the dict whatever is necessary to create the experiment on
- # the testbed
- lease_dict['lease_start_time'] = lease_start_time
- lease_dict['lease_duration'] = lease_duration
- lease_dict['added_nodes'] = added_nodes
- lease_dict['slice_name'] = slice_name
- lease_dict['slice_user'] = slice_user
- lease_dict['grain'] = self.GetLeaseGranularity()
-
-
-
- answer = self.query_sites.schedule_experiment(lease_dict)
- try:
- experiment_id = answer['id']
- except KeyError:
- logger.log_exc("CORTEXLAB_API \tLaunchExperimentOnTestbed \
- Impossible to create xp %s " %(answer))
- return None
-
- if experiment_id :
- logger.debug("CORTEXLAB_API \tLaunchExperimentOnTestbed \
- experiment_id %s added_nodes %s slice_user %s"
- %(experiment_id, added_nodes, slice_user))
-
-
- return experiment_id
-
-
-
-
- #Delete the jobs from job_iotlab table
- def DeleteSliceFromNodes(self, slice_record):
- """
- Deletes all the running or scheduled jobs of a given slice
- given its record.
-
- :param slice_record: record of the slice, must contain experiment_id,
- user
- :type slice_record: dict
- :returns: dict of the jobs'deletion status. Success= True, Failure=
- False, for each job id.
- :rtype: dict
-
- .. note: used in driver delete_sliver
-
- """
- logger.debug("CORTEXLAB_API \t DeleteSliceFromNodes %s "
- % (slice_record))
-
- if isinstance(slice_record['experiment_id'], list):
- experiment_bool_answer = {}
- for experiment_id in slice_record['experiment_id']:
- ret = self.DeleteOneLease(experiment_id, slice_record['user'])
-
- experiment_bool_answer.update(ret)
-
- else:
- experiment_bool_answer = [self.DeleteOneLease(
- slice_record['experiment_id'],
- slice_record['user'])]
-
- return experiment_bool_answer
-
-
-
- def GetLeaseGranularity(self):
- """ Returns the granularity of an experiment in the Iotlab testbed.
- OAR uses seconds for experiments duration , the granulaity is also
- defined in seconds.
- Experiments which last less than 10 min (600 sec) are invalid"""
- return self.grain
-
- @staticmethod
- def filter_lease(reservation_list, filter_type, filter_value ):
- """Filters the lease reservation list by removing each lease whose
- filter_type is not equal to the filter_value provided. Returns the list
- of leases in one slice, defined by the slice_hrn if filter_type
- is 'slice_hrn'. Otherwise, returns all leases scheduled starting from
- the filter_value if filter_type is 't_from'.
-
- :param reservation_list: leases list
- :type reservation_list: list of dictionary
- :param filter_type: can be either 't_from' or 'slice hrn'
- :type filter_type: string
- :param filter_value: depending on the filter_type, can be the slice_hrn
- or can be defining a timespan.
- :type filter_value: if filter_type is 't_from', filter_value is int.
- if filter_type is 'slice_hrn', filter_value is a string.
-
-
- :returns: filtered_reservation_list, contains only leases running or
- scheduled in the given slice (wanted_slice).Dict keys are
- 'lease_id','reserved_nodes','slice_id', 'state', 'user',
- 'component_id_list','slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list of dict
-
- """
- filtered_reservation_list = list(reservation_list)
- logger.debug("IOTLAB_API \t filter_lease_name reservation_list %s" \
- % (reservation_list))
- try:
- for reservation in reservation_list:
- if \
- (filter_type is 'slice_hrn' and \
- reservation['slice_hrn'] != filter_value) or \
- (filter_type is 't_from' and \
- reservation['t_from'] > filter_value):
- filtered_reservation_list.remove(reservation)
- except TypeError:
- logger.log_exc("Iotlabshell filter_lease : filter_type %s \
- filter_value %s not in lease" %(filter_type,
- filter_value))
-
- return filtered_reservation_list
-
- # @staticmethod
- # def filter_lease_name(reservation_list, filter_value):
- # filtered_reservation_list = list(reservation_list)
- # logger.debug("CORTEXLAB_API \t filter_lease_name reservation_list %s" \
- # % (reservation_list))
- # for reservation in reservation_list:
- # if 'slice_hrn' in reservation and \
- # reservation['slice_hrn'] != filter_value:
- # filtered_reservation_list.remove(reservation)
-
- # logger.debug("CORTEXLAB_API \t filter_lease_name filtered_reservation_list %s" \
- # % (filtered_reservation_list))
- # return filtered_reservation_list
-
- # @staticmethod
- # def filter_lease_start_time(reservation_list, filter_value):
- # filtered_reservation_list = list(reservation_list)
-
- # for reservation in reservation_list:
- # if 't_from' in reservation and \
- # reservation['t_from'] > filter_value:
- # filtered_reservation_list.remove(reservation)
-
- # return filtered_reservation_list
-
- def complete_leases_info(self, unfiltered_reservation_list, db_xp_dict):
-
- """Check that the leases list of dictionaries contains the appropriate
- fields and piece of information here
- :param unfiltered_reservation_list: list of leases to be completed.
- :param db_xp_dict: leases information in the lease_sfa table
- :returns local_unfiltered_reservation_list: list of leases completed.
- list of dictionaries describing the leases, with all the needed
- information (sfa,ldap,nodes)to identify one particular lease.
- :returns testbed_xp_list: list of experiments'ids running or scheduled
- on the testbed.
- :rtype local_unfiltered_reservation_list: list of dict
- :rtype testbed_xp_list: list
-
- """
- testbed_xp_list = []
- local_unfiltered_reservation_list = list(unfiltered_reservation_list)
- # slice_hrn and lease_id are in the lease_table,
- # so they are in the db_xp_dict.
- # component_id_list : list of nodes xrns
- # reserved_nodes : list of nodes' hostnames
- # slice_id : slice urn, can be made from the slice hrn using hrn_to_urn
- for resa in local_unfiltered_reservation_list:
-
- #Construct list of scheduled experiments (runing, waiting..)
- testbed_xp_list.append(resa['lease_id'])
- #If there is information on the experiment in the lease table
- #(slice used and experiment id), meaning the experiment was created
- # using sfa
- if resa['lease_id'] in db_xp_dict:
- xp_info = db_xp_dict[resa['lease_id']]
- logger.debug("CORTEXLAB_API \tGetLeases xp_info %s"
- % (xp_info))
- resa['slice_hrn'] = xp_info['slice_hrn']
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a cortexlab slice, created via the
- # cortexlab portal
- else:
- resa['slice_id'] = hrn_to_urn(self.root_auth + '.' +
- resa['user'] + "_slice", 'slice')
- resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
-
- resa['component_id_list'] = []
- #Transform the hostnames into urns (component ids)
- for node in resa['reserved_nodes']:
-
- iotlab_xrn = xrn_object(self.root_auth, node)
- resa['component_id_list'].append(iotlab_xrn.urn)
-
- return local_unfiltered_reservation_list, testbed_xp_list
-
-
-#TODO FUNCTIONS SECTION 04/07/2012 SA
-
- ##TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
- ##04/07/2012 SA
- #@staticmethod
- #def UnBindObjectFromPeer( auth, object_type, object_id, shortname):
- #""" This method is a hopefully temporary hack to let the sfa correctly
- #detach the objects it creates from a remote peer object. This is
- #needed so that the sfa federation link can work in parallel with
- #RefreshPeer, as RefreshPeer depends on remote objects being correctly
- #marked.
- #Parameters:
- #auth : struct, API authentication structure
- #AuthMethod : string, Authentication method to use
- #object_type : string, Object type, among 'site','person','slice',
- #'node','key'
- #object_id : int, object_id
- #shortname : string, peer shortname
- #FROM PLC DOC
-
- #"""
- #logger.warning("CORTEXLAB_API \tUnBindObjectFromPeer EMPTY-\
- #DO NOTHING \r\n ")
- #return
-
- ##TODO Is BindObjectToPeer still necessary ? Currently does nothing
- ##04/07/2012 SA
- #|| Commented out 28/05/13 SA
- #def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
- #remote_object_id=None):
- #"""This method is a hopefully temporary hack to let the sfa correctly
- #attach the objects it creates to a remote peer object. This is needed
- #so that the sfa federation link can work in parallel with RefreshPeer,
- #as RefreshPeer depends on remote objects being correctly marked.
- #Parameters:
- #shortname : string, peer shortname
- #remote_object_id : int, remote object_id, set to 0 if unknown
- #FROM PLC API DOC
-
- #"""
- #logger.warning("CORTEXLAB_API \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
- #return
-
- ##TODO UpdateSlice 04/07/2012 SA || Commented out 28/05/13 SA
- ##Funciton should delete and create another job since oin iotlab slice=job
- #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
- #"""Updates the parameters of an existing slice with the values in
- #slice_fields.
- #Users may only update slices of which they are members.
- #PIs may update any of the slices at their sites, or any slices of
- #which they are members. Admins may update any slice.
- #Only PIs and admins may update max_nodes. Slices cannot be renewed
- #(by updating the expires parameter) more than 8 weeks into the future.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- #logger.warning("CORTEXLAB_API UpdateSlice EMPTY - DO NOTHING \r\n ")
- #return
-
- #Unused SA 30/05/13, we only update the user's key or we delete it.
- ##TODO UpdatePerson 04/07/2012 SA
- #def UpdatePerson(self, iotlab_hrn, federated_hrn, person_fields=None):
- #"""Updates a person. Only the fields specified in person_fields
- #are updated, all other fields are left untouched.
- #Users and techs can only update themselves. PIs can only update
- #themselves and other non-PIs at their sites.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- ##new_row = FederatedToIotlab(iotlab_hrn, federated_hrn)
- ##self.leases_db.testbed_session.add(new_row)
- ##self.leases_db.testbed_session.commit()
-
- #logger.debug("CORTEXLAB_API UpdatePerson EMPTY - DO NOTHING \r\n ")
- #return
-
-
-
-
- #TODO : test
- def DeleteKey(self, user_record, key_string):
- """Deletes a key in the LDAP entry of the specified user.
-
- Removes the key_string from the user's key list and updates the LDAP
- user's entry with the new key attributes.
-
- :param key_string: The ssh key to remove
- :param user_record: User's record
- :type key_string: string
- :type user_record: dict
- :returns: True if sucessful, False if not.
- :rtype: Boolean
-
- """
- all_user_keys = user_record['keys']
- all_user_keys.remove(key_string)
- new_attributes = {'sshPublicKey':all_user_keys}
- ret = self.ldap.LdapModifyUser(user_record, new_attributes)
- logger.debug("CORTEXLAB_API DeleteKey %s- " % (ret))
- return ret['bool']
-
-
-
-
-
-
-
- #Update slice unused, therefore sfa_fields_to_iotlab_fields unused
- #SA 30/05/13
- #@staticmethod
- #def sfa_fields_to_iotlab_fields(sfa_type, hrn, record):
- #"""
- #"""
-
- #iotlab_record = {}
- ##for field in record:
- ## iotlab_record[field] = record[field]
-
- #if sfa_type == "slice":
- ##instantion used in get_slivers ?
- #if not "instantiation" in iotlab_record:
- #iotlab_record["instantiation"] = "iotlab-instantiated"
- ##iotlab_record["hrn"] = hrn_to_pl_slicename(hrn)
- ##Unused hrn_to_pl_slicename because Iotlab's hrn already
- ##in the appropriate form SA 23/07/12
- #iotlab_record["hrn"] = hrn
- #logger.debug("CORTEXLAB_API.PY sfa_fields_to_iotlab_fields \
- #iotlab_record %s " %(iotlab_record['hrn']))
- #if "url" in record:
- #iotlab_record["url"] = record["url"]
- #if "description" in record:
- #iotlab_record["description"] = record["description"]
- #if "expires" in record:
- #iotlab_record["expires"] = int(record["expires"])
-
- ##nodes added by OAR only and then imported to SFA
- ##elif type == "node":
- ##if not "hostname" in iotlab_record:
- ##if not "hostname" in record:
- ##raise MissingSfaInfo("hostname")
- ##iotlab_record["hostname"] = record["hostname"]
- ##if not "model" in iotlab_record:
- ##iotlab_record["model"] = "geni"
-
- ##One authority only
- ##elif type == "authority":
- ##iotlab_record["login_base"] = hrn_to_iotlab_login_base(hrn)
-
- ##if not "name" in iotlab_record:
- ##iotlab_record["name"] = hrn
-
- ##if not "abbreviated_name" in iotlab_record:
- ##iotlab_record["abbreviated_name"] = hrn
-
- ##if not "enabled" in iotlab_record:
- ##iotlab_record["enabled"] = True
-
- ##if not "is_public" in iotlab_record:
- ##iotlab_record["is_public"] = True
-
- #return iotlab_record
-
-
-
-
-
-
-
-
-
-
+++ /dev/null
-"""
-This file defines the IotlabSlices class by which all the slice checkings
-upon lease creation are done.
-"""
-from sfa.util.xrn import get_authority, urn_to_hrn
-from sfa.util.sfalogging import logger
-
-MAXINT = 2L**31-1
-
-
-class CortexlabSlices:
- """
- This class is responsible for checking the slice when creating a
- lease or a sliver. Those checks include verifying that the user is valid,
- that the slice is known from the testbed or from our peers, that the list
- of nodes involved has not changed (in this case the lease is modified
- accordingly).
- """
- rspec_to_slice_tag = {'max_rate': 'net_max_rate'}
-
- def __init__(self, driver):
- """
- Get the reference to the driver here.
- """
- self.driver = driver
-
- def get_peer(self, xrn):
- """
- Finds the authority of a resource based on its xrn.
- If the authority is Iotlab (local) return None,
- Otherwise, look up in the DB if Iotlab is federated with this site
- authority and returns its DB record if it is the case.
-
- :param xrn: resource's xrn
- :type xrn: string
- :returns: peer record
- :rtype: dict
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
- #Does this slice belong to a local site or a peer cortexlab site?
- peer = None
-
- # get this slice's authority (site)
- slice_authority = get_authority(hrn)
- #Iotlab stuff
- #This slice belongs to the current site
- if slice_authority == self.driver.testbed_shell.root_auth:
- site_authority = slice_authority
- return None
-
- site_authority = get_authority(slice_authority).lower()
- # get this site's authority (sfa root authority or sub authority)
-
- logger.debug("CortexlabSlices \t get_peer slice_authority %s \
- site_authority %s hrn %s"
- % (slice_authority, site_authority, hrn))
-
- # check if we are already peered with this site_authority
- #if so find the peer record
- peers = self.driver.GetPeers(peer_filter=site_authority)
- for peer_record in peers:
- if site_authority == peer_record.hrn:
- peer = peer_record
- logger.debug(" CortexlabSlices \tget_peer peer %s " % (peer))
- return peer
-
- def get_sfa_peer(self, xrn):
- """Returns the authority name for the xrn or None if the local site
- is the authority.
-
- :param xrn: the xrn of the resource we are looking the authority for.
- :type xrn: string
- :returns: the resources's authority name.
- :rtype: string
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
-
- # return the authority for this hrn or None if we are the authority
- sfa_peer = None
- slice_authority = get_authority(hrn)
- site_authority = get_authority(slice_authority)
-
- if site_authority != self.driver.hrn:
- sfa_peer = site_authority
-
- return sfa_peer
-
- def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
- """
- Compare requested leases with the leases already scheduled/
- running in OAR. If necessary, delete and recreate modified leases,
- and delete no longer requested ones.
-
- :param sfa_slice: sfa slice record
- :param requested_jobs_dict: dictionary of requested leases
- :param peer: sfa peer record
-
- :type sfa_slice: dict
- :type requested_jobs_dict: dict
- :type peer: dict
- :returns: leases list of dictionary
- :rtype: list
-
- """
-
- logger.debug("CortexlabSlices verify_slice_leases sfa_slice %s "
- % (sfa_slice))
- #First get the list of current leases from OAR
- leases = self.driver.GetLeases({'slice_hrn': sfa_slice['hrn']})
- logger.debug("CortexlabSlices verify_slice_leases requested_jobs_dict %s \
- leases %s " % (requested_jobs_dict, leases))
-
- current_nodes_reserved_by_start_time = {}
- requested_nodes_by_start_time = {}
- leases_by_start_time = {}
- reschedule_jobs_dict = {}
-
- #Create reduced dictionary with key start_time and value
- # the list of nodes
- #-for the leases already registered by OAR first
- # then for the new leases requested by the user
-
- #Leases already scheduled/running in OAR
- for lease in leases:
- current_nodes_reserved_by_start_time[lease['t_from']] = \
- lease['reserved_nodes']
- leases_by_start_time[lease['t_from']] = lease
-
- #First remove job whose duration is too short
- for job in requested_jobs_dict.values():
- job['duration'] = \
- str(int(job['duration']) \
- * self.driver.testbed_shell.GetLeaseGranularity())
- if job['duration'] < self.driver.testbed_shell.GetLeaseGranularity():
- del requested_jobs_dict[job['start_time']]
-
- #Requested jobs
- for start_time in requested_jobs_dict:
- requested_nodes_by_start_time[int(start_time)] = \
- requested_jobs_dict[start_time]['hostname']
- #Check if there is any difference between the leases already
- #registered in OAR and the requested jobs.
- #Difference could be:
- #-Lease deleted in the requested jobs
- #-Added/removed nodes
- #-Newly added lease
-
- logger.debug("CortexlabSlices verify_slice_leases \
- requested_nodes_by_start_time %s \
- "% (requested_nodes_by_start_time))
- #Find all deleted leases
- start_time_list = \
- list(set(leases_by_start_time.keys()).\
- difference(requested_nodes_by_start_time.keys()))
- deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
- for start_time in start_time_list]
-
-
- #Find added or removed nodes in exisiting leases
- for start_time in requested_nodes_by_start_time:
- logger.debug("CortexlabSlices verify_slice_leases start_time %s \
- "%( start_time))
- if start_time in current_nodes_reserved_by_start_time:
-
- if requested_nodes_by_start_time[start_time] == \
- current_nodes_reserved_by_start_time[start_time]:
- continue
-
- else:
- update_node_set = \
- set(requested_nodes_by_start_time[start_time])
- added_nodes = \
- update_node_set.difference(\
- current_nodes_reserved_by_start_time[start_time])
- shared_nodes = \
- update_node_set.intersection(\
- current_nodes_reserved_by_start_time[start_time])
- old_nodes_set = \
- set(\
- current_nodes_reserved_by_start_time[start_time])
- removed_nodes = \
- old_nodes_set.difference(\
- requested_nodes_by_start_time[start_time])
- logger.debug("CortexlabSlices verify_slice_leases \
- shared_nodes %s added_nodes %s removed_nodes %s"\
- %(shared_nodes, added_nodes,removed_nodes ))
- #If the lease is modified, delete it before
- #creating it again.
- #Add the deleted lease job id in the list
- #WARNING :rescheduling does not work if there is already
- # 2 running/scheduled jobs because deleting a job
- #takes time SA 18/10/2012
- if added_nodes or removed_nodes:
- deleted_leases.append(\
- leases_by_start_time[start_time]['lease_id'])
- #Reschedule the job
- if added_nodes or shared_nodes:
- reschedule_jobs_dict[str(start_time)] = \
- requested_jobs_dict[str(start_time)]
-
- else:
- #New lease
-
- job = requested_jobs_dict[str(start_time)]
- logger.debug("CortexlabSlices \
- NEWLEASE slice %s job %s"
- % (sfa_slice, job))
- job_id = self.driver.AddLeases(job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
- if job_id is not None:
- new_leases = self.driver.GetLeases(login=
- sfa_slice['login'])
- for new_lease in new_leases:
- leases.append(new_lease)
-
- #Deleted leases are the ones with lease id not declared in the Rspec
- if deleted_leases:
- self.driver.testbed_shell.DeleteLeases(deleted_leases,
- sfa_slice['user']['uid'])
- logger.debug("CortexlabSlices \
- verify_slice_leases slice %s deleted_leases %s"
- % (sfa_slice, deleted_leases))
-
- if reschedule_jobs_dict:
- for start_time in reschedule_jobs_dict:
- job = reschedule_jobs_dict[start_time]
- self.driver.AddLeases(job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
- return leases
-
- def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
- """Check for wanted and unwanted nodes in the slice.
-
- Removes nodes and associated leases that the user does not want anymore
- by deleteing the associated job in OAR (DeleteSliceFromNodes).
- Returns the nodes' hostnames that are going to be in the slice.
-
- :param sfa_slice: slice record. Must contain node_ids and list_node_ids.
-
- :param requested_slivers: list of requested nodes' hostnames.
- :param peer: unused so far.
-
- :type sfa_slice: dict
- :type requested_slivers: list
- :type peer: string
-
- :returns: list requested nodes hostnames
- :rtype: list
-
- .. warning:: UNUSED SQA 24/07/13
- .. seealso:: DeleteSliceFromNodes
- .. todo:: check what to do with the peer? Can not remove peer nodes from
- slice here. Anyway, in this case, the peer should have gotten the
- remove request too.
-
- """
- current_slivers = []
- deleted_nodes = []
-
- if 'node_ids' in sfa_slice:
- nodes = self.driver.testbed_shell.GetNodes(
- sfa_slice['list_node_ids'],
- ['hostname'])
- current_slivers = [node['hostname'] for node in nodes]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(current_slivers).
- difference(requested_slivers))
-
- logger.debug("CortexlabSlices \tverify_slice_nodes slice %s\
- \r\n \r\n deleted_nodes %s"
- % (sfa_slice, deleted_nodes))
-
- if deleted_nodes:
- #Delete the entire experience
- self.driver.testbed_shell.DeleteSliceFromNodes(sfa_slice)
- return nodes
-
- def verify_slice(self, slice_hrn, slice_record, sfa_peer):
- """Ensures slice record exists.
-
- The slice record must exist either in Iotlab or in the other
- federated testbed (sfa_peer). If the slice does not belong to Iotlab,
- check if the user already exists in LDAP. In this case, adds the slice
- to the sfa DB and associates its LDAP user.
-
- :param slice_hrn: slice's name
- :param slice_record: sfa record of the slice
- :param sfa_peer: name of the peer authority if any.(not Iotlab).
-
- :type slice_hrn: string
- :type slice_record: dictionary
- :type sfa_peer: string
-
- .. seealso:: AddSlice
-
-
- """
-
- slicename = slice_hrn
- # check if slice belongs to Iotlab
- slices_list = self.driver.GetSlices(
- slice_filter=slicename, slice_filter_type='slice_hrn')
-
- sfa_slice = None
-
- if slices_list:
- for sl in slices_list:
-
- logger.debug("CortexlabSlices \t verify_slice slicename %s \
- slices_list %s sl %s \r slice_record %s"
- % (slicename, slices_list, sl, slice_record))
- sfa_slice = sl
- sfa_slice.update(slice_record)
-
- else:
- #Search for user in ldap based on email SA 14/11/12
- ldap_user = self.driver.testbed_shell.ldap.LdapFindUser(\
- slice_record['user'])
- logger.debug(" CortexlabSlices \tverify_slice Oups \
- slice_record %s sfa_peer %s ldap_user %s"
- % (slice_record, sfa_peer, ldap_user))
- #User already registered in ldap, meaning user should be in SFA db
- #and hrn = sfa_auth+ uid
- sfa_slice = {'hrn': slicename,
- 'node_list': [],
- 'authority': slice_record['authority'],
- 'gid': slice_record['gid'],
- 'slice_id': slice_record['record_id'],
- 'reg-researchers': slice_record['reg-researchers'],
- 'peer_authority': str(sfa_peer)
- }
-
- if ldap_user:
- hrn = self.driver.testbed_shell.root_auth + '.' \
- + ldap_user['uid']
- user = self.driver.get_user_record(hrn)
-
- logger.debug(" CortexlabSlices \tverify_slice hrn %s USER %s"
- % (hrn, user))
-
- # add the external slice to the local SFA DB
- if sfa_slice:
- self.driver.AddSlice(sfa_slice, user)
-
- logger.debug("CortexlabSlices \tverify_slice ADDSLICE OK")
- return sfa_slice
-
-
- def verify_persons(self, slice_hrn, slice_record, users, options=None):
- """Ensures the users in users list exist and are enabled in LDAP. Adds
- person if needed(AddPerson).
-
- Checking that a user exist is based on the user's email. If the user is
- still not found in the LDAP, it means that the user comes from another
- federated testbed. In this case an account has to be created in LDAP
- so as to enable the user to use the testbed, since we trust the testbed
- he comes from. This is done by calling AddPerson.
-
- :param slice_hrn: slice name
- :param slice_record: record of the slice_hrn
- :param users: users is a record list. Records can either be
- local records or users records from known and trusted federated
- sites.If the user is from another site that cortex;ab doesn't trust
- yet, then Resolve will raise an error before getting to allocate.
-
- :type slice_hrn: string
- :type slice_record: string
- :type users: list
-
- .. seealso:: AddPerson
- .. note:: Removed unused peer and sfa_peer parameters. SA 18/07/13.
-
-
- """
-
- if options is None: options={}
-
- logger.debug("CortexlabSlices \tverify_persons \tslice_hrn %s \
- \t slice_record %s\r\n users %s \t "
- % (slice_hrn, slice_record, users))
-
-
- users_by_email = {}
- #users_dict : dict whose keys can either be the user's hrn or its id.
- #Values contains only id and hrn
- users_dict = {}
-
- #First create dicts by hrn and id for each user in the user record list:
- for info in users:
- # if 'slice_record' in info:
- # slice_rec = info['slice_record']
- # if 'user' in slice_rec :
- # user = slice_rec['user']
-
- if 'email' in info:
- users_by_email[info['email']] = info
- users_dict[info['email']] = info
-
-
- logger.debug("CortexlabSlices.PY \t verify_person \
- users_dict %s \r\n user_by_email %s \r\n "
- %(users_dict, users_by_email))
-
- existing_user_ids = []
- existing_user_emails = []
- existing_users = []
- # Check if user is in Iotlab LDAP using its hrn.
- # Assuming Iotlab is centralised : one LDAP for all sites,
- # user's record_id unknown from LDAP
- # LDAP does not provide users id, therefore we rely on email to find the
- # user in LDAP
-
- if users_by_email:
- #Construct the list of filters (list of dicts) for GetPersons
- filter_user = [users_by_email[email] for email in users_by_email]
- #Check user i in LDAP with GetPersons
- #Needed because what if the user has been deleted in LDAP but
- #is still in SFA?
- existing_users = self.driver.testbed_shell.GetPersons(filter_user)
- logger.debug(" \r\n CortexlabSlices.PY \tverify_person filter_user \
- %s existing_users %s "
- % (filter_user, existing_users))
- #User is in LDAP
- if existing_users:
- for user in existing_users:
- user['login'] = user['uid']
- users_dict[user['email']].update(user)
- existing_user_emails.append(
- users_dict[user['email']]['email'])
-
-
- # User from another known trusted federated site. Check
- # if a cortexlab account matching the email has already been created.
- else:
- req = 'mail='
- if isinstance(users, list):
- req += users[0]['email']
- else:
- req += users['email']
- ldap_reslt = self.driver.testbed_shell.ldap.LdapSearch(req)
-
- if ldap_reslt:
- logger.debug(" CortexlabSlices.PY \tverify_person users \
- USER already in Iotlab \t ldap_reslt %s \
- " % (ldap_reslt))
- existing_users.append(ldap_reslt[1])
-
- else:
- #User not existing in LDAP
- logger.debug("CortexlabSlices.PY \tverify_person users \
- not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
- ldap_reslt %s " % (users, ldap_reslt))
-
- requested_user_emails = users_by_email.keys()
- requested_user_hrns = \
- [users_by_email[user]['hrn'] for user in users_by_email]
- logger.debug("CortexlabSlices.PY \tverify_person \
- users_by_email %s " % (users_by_email))
-
- #Check that the user of the slice in the slice record
- #matches one of the existing users
- try:
- if slice_record['reg-researchers'][0] in requested_user_hrns:
- logger.debug(" CortexlabSlices \tverify_person ['PI']\
- slice_record %s" % (slice_record))
-
- except KeyError:
- pass
-
- # users to be added, removed or updated
- #One user in one cortexlab slice : there should be no need
- #to remove/ add any user from/to a slice.
- #However a user from SFA which is not registered in Iotlab yet
- #should be added to the LDAP.
- added_user_emails = set(requested_user_emails).\
- difference(set(existing_user_emails))
-
-
- #self.verify_keys(existing_slice_users, updated_users_list, \
- #peer, append)
-
- added_persons = []
- # add new users
- #requested_user_email is in existing_user_emails
- if len(added_user_emails) == 0:
- slice_record['login'] = users_dict[requested_user_emails[0]]['uid']
- logger.debug(" CortexlabSlices \tverify_person QUICK DIRTY %s"
- % (slice_record))
-
- for added_user_email in added_user_emails:
- added_user = users_dict[added_user_email]
- logger.debug(" CortexlabSlices \r\n \r\n \t verify_person \
- added_user %s" % (added_user))
- person = {}
- person['peer_person_id'] = None
- k_list = ['first_name', 'last_name', 'person_id']
- for k in k_list:
- if k in added_user:
- person[k] = added_user[k]
-
- person['pkey'] = added_user['keys'][0]
- person['mail'] = added_user['email']
- person['email'] = added_user['email']
- person['key_ids'] = added_user.get('key_ids', [])
-
- ret = self.driver.testbed_shell.AddPerson(person)
- if 'uid' in ret:
- # meaning bool is True and the AddPerson was successful
- person['uid'] = ret['uid']
- slice_record['login'] = person['uid']
- else:
- # error message in ret
- logger.debug(" CortexlabSlices ret message %s" %(ret))
-
- logger.debug(" CortexlabSlices \r\n \r\n \t THE SECOND verify_person\
- person %s" % (person))
- #Update slice_Record with the id now known to LDAP
-
-
- added_persons.append(person)
- return added_persons
-
-
- def verify_keys(self, persons, users, peer, options=None):
- """
- .. warning:: unused
- """
- if options is None: options={}
- # existing keys
- key_ids = []
- for person in persons:
- key_ids.extend(person['key_ids'])
- keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
-
- keydict = {}
- for key in keylist:
- keydict[key['key']] = key['key_id']
- existing_keys = keydict.keys()
-
- persondict = {}
- for person in persons:
- persondict[person['email']] = person
-
- # add new keys
- requested_keys = []
- updated_persons = []
- users_by_key_string = {}
- for user in users:
- user_keys = user.get('keys', [])
- updated_persons.append(user)
- for key_string in user_keys:
- users_by_key_string[key_string] = user
- requested_keys.append(key_string)
- if key_string not in existing_keys:
- key = {'key': key_string, 'key_type': 'ssh'}
- #try:
- ##if peer:
- #person = persondict[user['email']]
- #self.driver.testbed_shell.UnBindObjectFromPeer(
- # 'person',person['person_id'],
- # peer['shortname'])
- ret = self.driver.testbed_shell.AddPersonKey(
- user['email'], key)
- #if peer:
- #key_index = user_keys.index(key['key'])
- #remote_key_id = user['key_ids'][key_index]
- #self.driver.testbed_shell.BindObjectToPeer('key', \
- #key['key_id'], peer['shortname'], \
- #remote_key_id)
-
-
-
- # remove old keys (only if we are not appending)
- append = options.get('append', True)
- if append is False:
- removed_keys = set(existing_keys).difference(requested_keys)
- for key in removed_keys:
- #if peer:
- #self.driver.testbed_shell.UnBindObjectFromPeer('key', \
- #key, peer['shortname'])
-
- user = users_by_key_string[key]
- self.driver.testbed_shell.DeleteKey(user, key)
-
- return
+++ /dev/null
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = build
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
- @echo "Please use \`make <target>' where <target> is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " texinfo to make Texinfo files"
- @echo " info to make Texinfo files and run them through makeinfo"
- @echo " gettext to make PO message catalogs"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cortexlab_sfa_driver.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cortexlab_sfa_driver.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/cortexlab_sfa_driver"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cortexlab_sfa_driver"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo
- @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
- @echo "Run \`make' in that directory to run these through makeinfo" \
- "(use \`make info' here to do that automatically)."
-
-info:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo "Running Texinfo files through makeinfo..."
- make -C $(BUILDDIR)/texinfo info
- @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
- $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
- @echo
- @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# cortexlab_sfa_driver documentation build configuration file, created by
-# sphinx-quickstart on Mon Nov 18 12:11:50 2013.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../../../'))
-sys.path.insert(0, os.path.abspath('../../../storage/'))
-sys.path.insert(0, os.path.abspath('../../../../'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/versions/'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/'))
-sys.path.insert(0, os.path.abspath('../../../importer/'))
-print sys.path
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Cortexlab SFA driver'
-copyright = u'2013, Sandrine Avakian'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.1'
-# The full version, including alpha/beta/rc tags.
-release = '0.1'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'cortexlab_sfa_driverdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'cortexlab_sfa_driver.tex', u'cortexlab\\_sfa\\_driver Documentation',
- u'Sandrine Avakian', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'cortexlab_sfa_driver', u'cortexlab_sfa_driver Documentation',
- [u'Sandrine Avakian'], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output ------------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'cortexlab_sfa_driver', u'cortexlab_sfa_driver Documentation',
- u'Sandrine Avakian', 'cortexlab_sfa_driver', 'One line description of project.',
- 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+++ /dev/null
-cortexlab Package
-=================
-
-:mod:`LDAPapi` Module
----------------------
-
-.. automodule:: cortexlab.LDAPapi
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabaggregate` Module
---------------------------------
-
-.. automodule:: cortexlab.cortexlabaggregate
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabdriver` Module
------------------------------
-
-.. automodule:: cortexlab.cortexlabdriver
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabnodes` Module
-----------------------------
-
-.. automodule:: cortexlab.cortexlabnodes
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabpostgres` Module
--------------------------------
-
-.. automodule:: cortexlab.cortexlabpostgres
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabshell` Module
-----------------------------
-
-.. automodule:: cortexlab.cortexlabshell
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabslices` Module
------------------------------
-
-.. automodule:: cortexlab.cortexlabslices
- :members:
- :undoc-members:
- :show-inheritance:
-
+++ /dev/null
-.. cortexlab_sfa_driver documentation master file, created by
- sphinx-quickstart on Mon Nov 18 12:11:50 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-Welcome to cortexlab_sfa_driver's documentation!
-================================================
-
-===================
-Code tree overview
-===================
-
-------
-Driver
-------
-
-The Cortexlab driver source code is under the folder /sfa, along with the other
-testbeds driver folders. The /cortexlab directory contains the necessary files
-defining API for LDAP, the postgresql database as well as for the SFA
-managers.
-
-CortexlabShell
---------------
-
-**fill missing code in this class**
-
-This class contains methods to check reserved nodes, leases and launch/delete
-experiments on the testbed. Methods interacting with the testbed have
-to be completed.
-
-Cortexlabnodes
----------------
-
-**fill missing code in this class**
-
-CortexlabQueryTestbed class's goal is to get information from the testbed
-about the site and its nodes.
-There are two types of information about the nodes:
-
-* their properties : hostname, radio type, position, site, node_id and so on.
- (For a complete list of properties, please refer to the method
- get_all_nodes in cortexlabnodes.py).
-
-* their availability, whether the node is currently in use, in a scheduled experiment
- in the future or available. The availability of the nodes can be managed by a
- scheduler or a database. The node's availabity status is modified when it is
- added to/ deleted from an experiment. In SFA, this corresponds to
- creating/deleting a lease involving this node.
-
-Currently, CortexlabQueryTestbed is merely a skeleton of methods that have to be
-implemented with the real testbed API in order to provide the functionality
-they were designed for (see the cortxlabnodes file for further information
-on which methods have to be completed).
-
-
-In the LDAP file, the LDAPapi class is based on the unix schema.
-If this class is reused in another context, it might not work without some bit
-of customization. The naming (turning a hostname into a sfa hrn, a LDAP login
-into a hrn ) is also done in this class.
-
-The cortexlabpostgres file defines a dedicated cortexlab database, separated from the
-SFA database. Its purpose is to hold information that we can't store anywhere
-given the Cortexlab architecture with OAR and LDAP, namely the association of a
-job and the slice hrn for which the job is supposed to run. Indeed, one user
-may register on another federated testbed then use his federated slice to book
-cortexlab nodes. In this case, an Cortexlab LDAP account will be created. Later on,
-when new users will be imported from the LDAP to the SFA database, a Cortexlab
-slice will be created for each new user found in the LDAP. Thus leading us to
-the situation where one user may have the possibility to use different slices
-to book Cortexlab nodes.
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
+++ /dev/null
-cortexlab
-=========
-
-.. toctree::
- :maxdepth: 4
-
- cortexlab
# the importer class
def importer_class (self):
import sfa.importer.iotlabimporter
- return sfa.importer.iotlabimporter.IotlabImporter
+ return sfa.importer.iotlabimporter.IotLabImporter
# the manager classes for the server-side services
def registry_manager_class (self) :
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
- # driver class for server-side services, talk to the whole testbed
def driver_class (self):
import sfa.iotlab.iotlabdriver
- return sfa.iotlab.iotlabdriver.IotlabDriver
+ return sfa.iotlab.iotlabdriver.IotLabDriver
- # iotlab does not have a component manager yet
- # manager class
def component_manager_class (self):
return None
# driver_class
-""" File defining the importer class and all the methods needed to import
-the nodes, users and slices from OAR and LDAP to the SFA database.
-Also creates the iotlab specific table to keep track
-of which slice hrn contains which job.
-"""
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
-from sfa.iotlab.iotlabshell import IotlabShell
-# from sfa.iotlab.iotlabdriver import IotlabDriver
-# from sfa.iotlab.iotlabpostgres import TestbedAdditionalSfaDB
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.gid import create_uuid
-
-# using global alchemy.session() here is fine
-# as importer is on standalone one-shot process
-
-from sfa.storage.alchemy import global_dbsession, engine
-from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
- RegUser, RegKey, init_tables
+# -*- coding:utf-8 -*-
+""" Iot-LAB importer class management """
+from sfa.storage.alchemy import engine
+from sfa.storage.model import init_tables
from sqlalchemy import Table, MetaData
-from sqlalchemy.exc import SQLAlchemyError, NoSuchTableError
-
-
+from sqlalchemy.exc import NoSuchTableError
-class IotlabImporter:
+class IotLabImporter:
"""
- IotlabImporter class, generic importer_class. Used to populate the SFA DB
- with iotlab resources' records.
- Used to update records when new resources, users or nodes, are added
- or deleted.
+ Creates the iotlab specific lease table to keep track
+ of which slice hrn match OAR job
"""
def __init__(self, auth_hierarchy, loc_logger):
- """
- Sets and defines import logger and the authority name. Gathers all the
- records already registerd in the SFA DB, broke them into 3 dicts, by
- type and hrn, by email and by type and pointer.
-
- :param auth_hierarchy: authority name
- :type auth_hierarchy: string
- :param loc_logger: local logger
- :type loc_logger: _SfaLogger
-
- """
- self.auth_hierarchy = auth_hierarchy
self.logger = loc_logger
self.logger.setLevelDebug()
- #retrieve all existing SFA objects
- self.all_records = global_dbsession.query(RegRecord).all()
-
- # initialize record.stale to True by default,
- # then mark stale=False on the ones that are in use
- for record in self.all_records:
- record.stale = True
- #create hash by (type,hrn)
- #used to know if a given record is already known to SFA
- self.records_by_type_hrn = \
- dict([((record.type, record.hrn), record)
- for record in self.all_records])
-
- self.users_rec_by_email = \
- dict([(record.email, record)
- for record in self.all_records if record.type == 'user'])
-
- # create hash by (type,pointer)
- self.records_by_type_pointer = \
- dict([((str(record.type), record.pointer), record)
- for record in self.all_records if record.pointer != -1])
-
-
-
- def exists(self, tablename):
+ def add_options (self, parser):
+ """ Not used and need by SFA """
+ pass
+
+ def _exists(self, tablename):
"""
- Checks if the table specified as tablename exists.
- :param tablename: name of the table in the db that has to be checked.
- :type tablename: string
- :returns: True if the table exists, False otherwise.
- :rtype: bool
-
+ Checks if the table exists in SFA database.
"""
metadata = MetaData(bind=engine)
try:
- table = Table(tablename, metadata, autoload=True)
+ Table(tablename, metadata, autoload=True)
return True
except NoSuchTableError:
- self.logger.log_exc("Iotlabimporter tablename %s does not exist"
- % (tablename))
return False
-
-
- @staticmethod
- def hostname_to_hrn_escaped(root_auth, hostname):
- """
-
- Returns a node's hrn based on its hostname and the root authority and by
- removing special caracters from the hostname.
-
- :param root_auth: root authority name
- :param hostname: nodes's hostname
- :type root_auth: string
- :type hostname: string
- :rtype: string
- """
- return '.'.join([root_auth, Xrn.escape(hostname)])
-
-
- @staticmethod
- def slicename_to_hrn(person_hrn):
- """
-
- Returns the slicename associated to a given person's hrn.
-
- :param person_hrn: user's hrn
- :type person_hrn: string
- :rtype: string
- """
- return (person_hrn + '_slice')
-
- def add_options(self, parser):
- """
- .. warning:: not used
- """
- # we don't have any options for now
- pass
-
- def find_record_by_type_hrn(self, record_type, hrn):
- """
- Finds the record associated with the hrn and its type given in parameter
- if the tuple (hrn, type hrn) is an existing key in the dictionary.
-
- :param record_type: the record's type (slice, node, authority...)
- :type record_type: string
- :param hrn: Human readable name of the object's record
- :type hrn: string
- :returns: Returns the record associated with a given hrn and hrn type.
- Returns None if the key tuple is not in the dictionary.
- :rtype: RegUser if user, RegSlice if slice, RegNode if node...or None if
- record does not exist.
-
- """
- return self.records_by_type_hrn.get((record_type, hrn), None)
-
- def locate_by_type_pointer(self, record_type, pointer):
- """
- Returns the record corresponding to the key pointer and record type.
- Returns None if the record does not exist and is not in the
- records_by_type_pointer dictionnary.
-
- :param record_type: the record's type (slice, node, authority...)
- :type record_type: string
- :param pointer: Pointer to where the record is in the origin db,
- used in case the record comes from a trusted authority.
- :type pointer: integer
- :rtype: RegUser if user, RegSlice if slice, RegNode if node, or None if
- record does not exist.
- """
- return self.records_by_type_pointer.get((record_type, pointer), None)
-
-
- def update_just_added_records_dict(self, record):
- """
-
- Updates the records_by_type_hrn dictionnary if the record has
- just been created.
-
- :param record: Record to add in the records_by_type_hrn dict.
- :type record: dictionary
- """
- rec_tuple = (record.type, record.hrn)
- if rec_tuple in self.records_by_type_hrn:
- self.logger.warning("IotlabImporter.update_just_added_records_dict:\
- duplicate (%s,%s)" % rec_tuple)
- return
- self.records_by_type_hrn[rec_tuple] = record
-
-
- def import_nodes(self, site_node_ids, nodes_by_id, testbed_shell):
- """
-
- Creates appropriate hostnames and RegNode records for each node in
- site_node_ids, based on the information given by the dict nodes_by_id
- that was made from data from OAR. Saves the records to the DB.
-
- :param site_node_ids: site's node ids
- :type site_node_ids: list of integers
- :param nodes_by_id: dictionary , key is the node id, value is the a dict
- with node information.
- :type nodes_by_id: dictionary
- :param testbed_shell: IotlabDriver object, used to have access to
- testbed_shell attributes.
- :type testbed_shell: IotlabDriver
-
- :returns: None
- :rtype: None
-
- """
-
- for node_id in site_node_ids:
- try:
- node = nodes_by_id[node_id]
- except KeyError:
- self.logger.warning("IotlabImporter: cannot find node_id %s \
- - ignored" % (node_id))
- continue
- escaped_hrn = \
- self.hostname_to_hrn_escaped(testbed_shell.root_auth,
- node['hostname'])
- self.logger.info("IOTLABIMPORTER node %s " % (node))
- hrn = node['hrn']
-
- # xxx this sounds suspicious
- if len(hrn) > 64:
- hrn = hrn[:64]
- node_record = self.find_record_by_type_hrn('node', hrn)
- if not node_record:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(escaped_hrn, 'node')
- node_gid = \
- self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
-
- def testbed_get_authority(hrn):
- """ Gets the authority part in the hrn.
- :param hrn: hrn whose authority we are looking for.
- :type hrn: string
- :returns: splits the hrn using the '.' separator and returns
- the authority part of the hrn.
- :rtype: string
-
- """
- return hrn.split(".")[0]
-
- node_record = RegNode(hrn=hrn, gid=node_gid,
- pointer='-1',
- authority=testbed_get_authority(hrn))
- try:
-
- node_record.just_created()
- global_dbsession.add(node_record)
- global_dbsession.commit()
- self.logger.info("IotlabImporter: imported node: %s"
- % node_record)
- self.update_just_added_records_dict(node_record)
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to import node")
- else:
- #TODO: xxx update the record ...
- pass
- node_record.stale = False
-
- def import_sites_and_nodes(self, testbed_shell):
- """
-
- Gets all the sites and nodes from OAR, process the information,
- creates hrns and RegAuthority for sites, and feed them to the database.
- For each site, import the site's nodes to the DB by calling
- import_nodes.
-
- :param testbed_shell: IotlabDriver object, used to have access to
- testbed_shell methods and fetching info on sites and nodes.
- :type testbed_shell: IotlabDriver
- """
-
- sites_listdict = testbed_shell.GetSites()
- nodes_listdict = testbed_shell.GetNodes()
- nodes_by_id = dict([(node['node_id'], node) for node in nodes_listdict])
- for site in sites_listdict:
- site_hrn = site['name']
- site_record = self.find_record_by_type_hrn ('authority', site_hrn)
- self.logger.info("IotlabImporter: import_sites_and_nodes \
- (site) %s \r\n " % site_record)
- if not site_record:
- try:
- urn = hrn_to_urn(site_hrn, 'authority')
- if not self.auth_hierarchy.auth_exists(urn):
- self.auth_hierarchy.create_auth(urn)
-
- auth_info = self.auth_hierarchy.get_auth_info(urn)
- site_record = \
- RegAuthority(hrn=site_hrn,
- gid=auth_info.get_gid_object(),
- pointer='-1',
- authority=get_authority(site_hrn))
- site_record.just_created()
- global_dbsession.add(site_record)
- global_dbsession.commit()
- self.logger.info("IotlabImporter: imported authority \
- (site) %s" % site_record)
- self.update_just_added_records_dict(site_record)
- except SQLAlchemyError:
- # if the site import fails then there is no point in
- # trying to import the
- # site's child records(node, slices, persons), so skip them.
- self.logger.log_exc("IotlabImporter: failed to import \
- site. Skipping child records")
- continue
- else:
- # xxx update the record ...
- pass
-
- site_record.stale = False
- self.import_nodes(site['node_ids'], nodes_by_id, testbed_shell)
-
- return
-
-
-
- def init_person_key(self, person, iotlab_key):
- """
- Returns a tuple pubkey and pkey.
-
- :param person Person's data.
- :type person: dict
- :param iotlab_key: SSH public key, from LDAP user's data. RSA type
- supported.
- :type iotlab_key: string
- :rtype: (string, Keypair)
-
- """
- pubkey = None
- if person['pkey']:
- # randomly pick first key in set
- pubkey = iotlab_key
-
- try:
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- self.logger.warn("IotlabImporter: \
- unable to convert public \
- key for %s" % person['hrn'])
- pkey = Keypair(create=True)
-
- else:
- # the user has no keys.
- #Creating a random keypair for the user's gid
- self.logger.warn("IotlabImporter: person %s does not have a \
- public key" % (person['hrn']))
- pkey = Keypair(create=True)
- return (pubkey, pkey)
-
- def import_persons_and_slices(self, testbed_shell):
- """
-
- Gets user data from LDAP, process the information.
- Creates hrn for the user's slice, the user's gid, creates
- the RegUser record associated with user. Creates the RegKey record
- associated nwith the user's key.
- Saves those records into the SFA DB.
- import the user's slice onto the database as well by calling
- import_slice.
-
- :param testbed_shell: IotlabDriver object, used to have access to
- testbed_shell attributes.
- :type testbed_shell: IotlabDriver
-
- .. warning:: does not support multiple keys per user
- """
- ldap_person_listdict = testbed_shell.GetPersons()
- self.logger.info("IOTLABIMPORT \t ldap_person_listdict %s \r\n"
- % (ldap_person_listdict))
-
- # import persons
- for person in ldap_person_listdict:
-
- self.logger.info("IotlabImporter: person :" % (person))
- if 'ssh-rsa' not in person['pkey']:
- #people with invalid ssh key (ssh-dss, empty, bullshit keys...)
- #won't be imported
- continue
- person_hrn = person['hrn']
- slice_hrn = self.slicename_to_hrn(person['hrn'])
-
- # xxx suspicious again
- if len(person_hrn) > 64:
- person_hrn = person_hrn[:64]
- person_urn = hrn_to_urn(person_hrn, 'user')
-
-
- self.logger.info("IotlabImporter: users_rec_by_email %s "
- % (self.users_rec_by_email))
-
- #Check if user using person['email'] from LDAP is already registered
- #in SFA. One email = one person. In this case, do not create another
- #record for this person
- #person_hrn returned by GetPerson based on iotlab root auth +
- #uid ldap
- user_record = self.find_record_by_type_hrn('user', person_hrn)
-
- if not user_record and person['email'] in self.users_rec_by_email:
- user_record = self.users_rec_by_email[person['email']]
- person_hrn = user_record.hrn
- person_urn = hrn_to_urn(person_hrn, 'user')
-
-
- slice_record = self.find_record_by_type_hrn('slice', slice_hrn)
-
- iotlab_key = person['pkey']
- # new person
- if not user_record:
- (pubkey, pkey) = self.init_person_key(person, iotlab_key)
- if pubkey is not None and pkey is not None:
- person_gid = \
- self.auth_hierarchy.create_gid(person_urn,
- create_uuid(), pkey)
- if person['email']:
- self.logger.debug("IOTLAB IMPORTER \
- PERSON EMAIL OK email %s " % (person['email']))
- person_gid.set_email(person['email'])
- user_record = \
- RegUser(hrn=person_hrn,
- gid=person_gid,
- pointer='-1',
- authority=get_authority(person_hrn),
- email=person['email'])
- else:
- user_record = \
- RegUser(hrn=person_hrn,
- gid=person_gid,
- pointer='-1',
- authority=get_authority(person_hrn))
-
- if pubkey:
- user_record.reg_keys = [RegKey(pubkey)]
- else:
- self.logger.warning("No key found for user %s"
- % (user_record))
-
- try:
- user_record.just_created()
- global_dbsession.add (user_record)
- global_dbsession.commit()
- self.logger.info("IotlabImporter: imported person \
- %s" % (user_record))
- self.update_just_added_records_dict(user_record)
-
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: \
- failed to import person %s" % (person))
- else:
- # update the record ?
- # if user's primary key has changed then we need to update
- # the users gid by forcing an update here
- sfa_keys = user_record.reg_keys
-
- new_key = False
- if iotlab_key is not sfa_keys:
- new_key = True
- if new_key:
- self.logger.info("IotlabImporter: \t \t USER UPDATE \
- person: %s" % (person['hrn']))
- (pubkey, pkey) = self.init_person_key(person, iotlab_key)
- person_gid = \
- self.auth_hierarchy.create_gid(person_urn,
- create_uuid(), pkey)
- if not pubkey:
- user_record.reg_keys = []
- else:
- user_record.reg_keys = [RegKey(pubkey)]
- self.logger.info("IotlabImporter: updated person: %s"
- % (user_record))
-
- if person['email']:
- user_record.email = person['email']
-
- try:
- global_dbsession.commit()
- user_record.stale = False
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: \
- failed to update person %s"% (person))
-
- self.import_slice(slice_hrn, slice_record, user_record)
-
-
- def import_slice(self, slice_hrn, slice_record, user_record):
- """
-
- Create RegSlice record according to the slice hrn if the slice
- does not exist yet.Creates a relationship with the user record
- associated with the slice.
- Commit the record to the database.
-
-
- :param slice_hrn: Human readable name of the slice.
- :type slice_hrn: string
- :param slice_record: record of the slice found in the DB, if any.
- :type slice_record: RegSlice or None
- :param user_record: user record found in the DB if any.
- :type user_record: RegUser
-
- .. todo::Update the record if a slice record already exists.
- """
- if not slice_record:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(slice_hrn, 'slice')
- slice_gid = \
- self.auth_hierarchy.create_gid(urn,
- create_uuid(), pkey)
- slice_record = RegSlice(hrn=slice_hrn, gid=slice_gid,
- pointer='-1',
- authority=get_authority(slice_hrn))
- try:
- slice_record.just_created()
- global_dbsession.add(slice_record)
- global_dbsession.commit()
-
-
- self.update_just_added_records_dict(slice_record)
-
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to import slice")
-
- #No slice update upon import in iotlab
- else:
- # xxx update the record ...
- self.logger.warning("Iotlab Slice update not implemented")
-
- # record current users affiliated with the slice
- slice_record.reg_researchers = [user_record]
- try:
- global_dbsession.commit()
- slice_record.stale = False
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to update slice")
-
+
def run(self, options):
- """
- Create the special iotlab table, lease_table, in the SFA database.
- Import everything (users, slices, nodes and sites from OAR
- and LDAP) into the SFA database.
- Delete stale records that are no longer in OAR or LDAP.
- :param options:
- :type options:
- """
-
- config = Config ()
- interface_hrn = config.SFA_INTERFACE_HRN
- root_auth = config.SFA_REGISTRY_ROOT_AUTH
-
- testbed_shell = IotlabShell(config)
- # leases_db = TestbedAdditionalSfaDB(config)
- #Create special slice table for iotlab
-
- if not self.exists('lease_table'):
+ """ Run importer"""
+ if not self._exists('lease_table'):
init_tables(engine)
- self.logger.info("IotlabImporter.run: lease_table table created ")
-
- # import site and node records in site into the SFA db.
- self.import_sites_and_nodes(testbed_shell)
- #import users and slice into the SFA DB.
- #self.import_persons_and_slices(testbed_shell)
-
- ### remove stale records
- # special records must be preserved
- system_hrns = [interface_hrn, root_auth,
- interface_hrn + '.slicemanager']
- for record in self.all_records:
- if record.hrn in system_hrns:
- record.stale = False
- if record.peer_authority:
- record.stale = False
-
- for record in self.all_records:
- if record.type == 'user':
- self.logger.info("IotlabImporter: stale records: hrn %s %s"
- % (record.hrn, record.stale))
- try:
- stale = record.stale
- except:
- stale = True
- self.logger.warning("stale not found with %s" % record)
- if stale:
- self.logger.info("IotlabImporter: deleting stale record: %s"
- % (record))
-
- try:
- global_dbsession.delete(record)
- global_dbsession.commit()
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to delete \
- stale record %s" % (record))
+ self.logger.info("iotlabimporter run lease_table created")
def __init__ (self, auth_hierarchy, logger):
self.auth_hierarchy = auth_hierarchy
- self.logger=logger
+ self.logger = logger
def add_options (self, parser):
# we don't have any options for now
def remember_record_by_hrn (self, record):
tuple = (record.type, record.hrn)
if tuple in self.records_by_type_hrn:
- self.logger.warning ("PlImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+ self.logger.warning ("PlImporter.remember_record_by_hrn: duplicate {}".format(tuple))
return
self.records_by_type_hrn [ tuple ] = record
return
tuple = (record.type, record.pointer)
if tuple in self.records_by_type_pointer:
- self.logger.warning ("PlImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+ self.logger.warning ("PlImporter.remember_record_by_pointer: duplicate {}".format(tuple))
return
self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
auth_record.just_created()
global_dbsession.add(auth_record)
global_dbsession.commit()
- self.logger.info("PlImporter: Imported authority (vini site) %s"%auth_record)
+ self.logger.info("PlImporter: Imported authority (vini site) {}".format(auth_record))
self.remember_record ( site_record )
def run (self, options):
if record.pointer != -1] )
# initialize record.stale to True by default, then mark stale=False on the ones that are in use
- for record in all_records: record.stale=True
+ for record in all_records:
+ record.stale = True
######## retrieve PLC data
# Get all plc sites
key = keys_by_id[key_id]
pubkeys.append(key)
except:
- self.logger.warning("Could not spot key %d - probably non-ssh"%key_id)
+ self.logger.warning("Could not spot key {} - probably non-ssh".format(key_id))
keys_by_person_id[person['person_id']] = pubkeys
# Get all plc nodes
nodes = shell.GetNodes( {'peer_id': None}, ['node_id', 'hostname', 'site_id'])
self.create_special_vini_record (interface_hrn)
# Get top authority record
- top_auth_record=self.locate_by_type_hrn ('authority', root_auth)
+ top_auth_record = self.locate_by_type_hrn ('authority', root_auth)
admins = []
# start importing
site_hrn = site['hrn']
# import if hrn is not in list of existing hrns or if the hrn exists
# but its not a site record
- site_record=self.locate_by_type_hrn ('authority', site_hrn)
+ site_record = self.locate_by_type_hrn ('authority', site_hrn)
if not site_record:
try:
urn = hrn_to_urn(site_hrn, 'authority')
auth_info = self.auth_hierarchy.get_auth_info(urn)
site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
pointer=site['site_id'],
- authority=get_authority(site_hrn))
+ authority=get_authority(site_hrn),
+ name=site['name'])
site_record.just_created()
global_dbsession.add(site_record)
global_dbsession.commit()
- self.logger.info("PlImporter: imported authority (site) : %s" % site_record)
- self.remember_record (site_record)
+ self.logger.info("PlImporter: imported authority (site) : {}".format(site_record))
+ self.remember_record(site_record)
except:
# if the site import fails then there is no point in trying to import the
# site's child records (node, slices, persons), so skip them.
- self.logger.log_exc("PlImporter: failed to import site %s. Skipping child records"%site_hrn)
+ self.logger.log_exc("PlImporter: failed to import site {}. Skipping child records"\
+ .format(site_hrn))
continue
else:
# xxx update the record ...
+ site_record.name = site['name']
pass
- site_record.stale=False
+ site_record.stale = False
# import node records
for node_id in site['node_ids']:
try:
node = nodes_by_id[node_id]
except:
- self.logger.warning ("PlImporter: cannot find node_id %s - ignored"%node_id)
+ self.logger.warning ("PlImporter: cannot find node_id {} - ignored"
+ .format(node_id))
continue
site_auth = get_authority(site_hrn)
site_name = site['login_base']
node_record.just_created()
global_dbsession.add(node_record)
global_dbsession.commit()
- self.logger.info("PlImporter: imported node: %s" % node_record)
+ self.logger.info("PlImporter: imported node: {}".format(node_record))
self.remember_record (node_record)
except:
- self.logger.log_exc("PlImporter: failed to import node %s"%node_hrn)
+ self.logger.log_exc("PlImporter: failed to import node {}".format(node_hrn))
continue
else:
# xxx update the record ...
pass
- node_record.stale=False
+ node_record.stale = False
- site_pis=[]
+ site_pis = []
# import persons
for person_id in site['person_ids']:
- proceed=False
+ proceed = False
if person_id in persons_by_id:
- person=persons_by_id[person_id]
- proceed=True
+ person = persons_by_id[person_id]
+ proceed = True
elif person_id in disabled_person_ids:
pass
else:
- self.logger.warning ("PlImporter: cannot locate person_id %s in site %s - ignored"%(person_id,site_hrn))
+ self.logger.warning ("PlImporter: cannot locate person_id {} in site {} - ignored"\
+ .format(person_id, site_hrn))
# make sure to NOT run this if anything is wrong
if not proceed: continue
#person_hrn = email_to_hrn(site_hrn, person['email'])
person_hrn = person['hrn']
if person_hrn is None:
- self.logger.warn("Person %s has no hrn - skipped"%person['email'])
+ self.logger.warn("Person {} has no hrn - skipped".format(person['email']))
continue
# xxx suspicious again
- if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+ if len(person_hrn) > 64:
+ person_hrn = person_hrn[:64]
person_urn = hrn_to_urn(person_hrn, 'user')
user_record = self.locate_by_type_hrn ( 'user', person_hrn)
# return a tuple pubkey (a plc key object) and pkey (a Keypair object)
def init_person_key (person, plc_keys):
- pubkey=None
+ pubkey = None
if person['key_ids']:
# randomly pick first key in set
pubkey = plc_keys[0]
try:
pkey = convert_public_key(pubkey['key'])
except:
- self.logger.warn('PlImporter: unable to convert public key for %s' % person_hrn)
+ self.logger.warn('PlImporter: unable to convert public key for {}'
+ .format(person_hrn))
pkey = Keypair(create=True)
else:
# the user has no keys. Creating a random keypair for the user's gid
- self.logger.warn("PlImporter: person %s does not have a PL public key"%person_hrn)
+ self.logger.warn("PlImporter: person {} does not have a PL public key"
+ .format(person_hrn))
pkey = Keypair(create=True)
return (pubkey, pkey)
try:
plc_keys = keys_by_person_id.get(person['person_id'],[])
if not user_record:
- (pubkey,pkey) = init_person_key (person, plc_keys )
- person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey, email=person['email'])
+ (pubkey, pkey) = init_person_key (person, plc_keys )
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey,
+ email=person['email'])
user_record = RegUser (hrn=person_hrn, gid=person_gid,
pointer=person['person_id'],
authority=get_authority(person_hrn),
if pubkey:
user_record.reg_keys=[RegKey (pubkey['key'], pubkey['key_id'])]
else:
- self.logger.warning("No key found for user %s"%user_record)
+ self.logger.warning("No key found for user {}".format(user_record))
user_record.just_created()
global_dbsession.add (user_record)
global_dbsession.commit()
- self.logger.info("PlImporter: imported person: %s" % user_record)
+ self.logger.info("PlImporter: imported person: {}".format(user_record))
self.remember_record ( user_record )
else:
# update the record ?
sfa_keys = user_record.reg_keys
def sfa_key_in_list (sfa_key,plc_keys):
for plc_key in plc_keys:
- if plc_key['key']==sfa_key.key:
+ if plc_key['key'] == sfa_key.key:
return True
return False
# are all the SFA keys known to PLC ?
- new_keys=False
+ new_keys = False
if not sfa_keys and plc_keys:
- new_keys=True
+ new_keys = True
else:
for sfa_key in sfa_keys:
if not sfa_key_in_list (sfa_key,plc_keys):
person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
person_gid.set_email(person['email'])
if not pubkey:
- user_record.reg_keys=[]
+ user_record.reg_keys = []
else:
- user_record.reg_keys=[ RegKey (pubkey['key'], pubkey['key_id'])]
+ user_record.reg_keys = [ RegKey (pubkey['key'], pubkey['key_id'])]
user_record.gid = person_gid
user_record.just_updated()
- self.logger.info("PlImporter: updated person: %s" % user_record)
+ self.logger.info("PlImporter: updated person: {}".format(user_record))
user_record.email = person['email']
global_dbsession.commit()
- user_record.stale=False
+ user_record.stale = False
# accumulate PIs - PLCAPI has a limitation that when someone has PI role
# this is valid for all sites she is in..
- # PI is coded with role_id==20
+ # PI is coded with role_id == 20
if 20 in person['role_ids']:
site_pis.append (user_record)
admins.append(user_record)
except:
- self.logger.log_exc("PlImporter: failed to import person %d %s"%(person['person_id'],person['email']))
+ self.logger.log_exc("PlImporter: failed to import person {} {}"
+ .format(person['person_id'], person['email']))
# maintain the list of PIs for a given site
# for the record, Jordan had proposed the following addition as a welcome hotfix to a previous version:
try:
slice = slices_by_id[slice_id]
except:
- self.logger.warning ("PlImporter: cannot locate slice_id %s - ignored"%slice_id)
+ self.logger.warning ("PlImporter: cannot locate slice_id {} - ignored"
+ .format(slice_id))
continue
#slice_hrn = slicename_to_hrn(interface_hrn, slice['name'])
slice_hrn = slice['hrn']
if slice_hrn is None:
- self.logger.warning("Slice %s has no hrn - skipped"%slice['name'])
+ self.logger.warning("Slice {} has no hrn - skipped"
+ .format(slice['name']))
continue
slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
if not slice_record:
slice_record.just_created()
global_dbsession.add(slice_record)
global_dbsession.commit()
- self.logger.info("PlImporter: imported slice: %s" % slice_record)
+ self.logger.info("PlImporter: imported slice: {}".format(slice_record))
self.remember_record ( slice_record )
except:
- self.logger.log_exc("PlImporter: failed to import slice %s (%s)"%(slice_hrn,slice['name']))
+ self.logger.log_exc("PlImporter: failed to import slice {} ({})"
+ .format(slice_hrn, slice['name']))
else:
# xxx update the record ...
# given that we record the current set of users anyways, there does not seem to be much left to do here
- # self.logger.warning ("Slice update not yet implemented on slice %s (%s)"%(slice_hrn,slice['name']))
+ # self.logger.warning ("Slice update not yet implemented on slice {} ({})"
+ # .format(slice_hrn, slice['name']))
pass
# record current users affiliated with the slice
slice_record.reg_researchers = \
- [ self.locate_by_type_pointer ('user',user_id) for user_id in slice['person_ids'] ]
+ [ self.locate_by_type_pointer ('user', user_id) for user_id in slice['person_ids'] ]
+ # remove any weird value (looks like we can get 'None' here
+ slice_record.reg_researchers = [ x for x in slice_record.reg_researchers if x ]
global_dbsession.commit()
- slice_record.stale=False
+ slice_record.stale = False
# Set PL Admins as PI's of the top authority
if admins:
top_auth_record.reg_pis = list(set(admins))
global_dbsession.commit()
- self.logger.info('PlImporter: set PL admins %s as PIs of %s'%(admins,top_auth_record.hrn))
+ self.logger.info('PlImporter: set PL admins {} as PIs of {}'
+ .format(admins, top_auth_record.hrn))
### remove stale records
# special records must be preserved
system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
for record in all_records:
if record.hrn in system_hrns:
- record.stale=False
+ record.stale = False
if record.peer_authority:
- record.stale=False
+ record.stale = False
if ".vini" in interface_hrn and interface_hrn.endswith('vini') and \
record.hrn.endswith("internet2"):
- record.stale=False
+ record.stale = False
for record in all_records:
- try: stale=record.stale
+ try: stale = record.stale
except:
- stale=True
- self.logger.warning("stale not found with %s"%record)
+ stale = True
+ self.logger.warning("stale not found with {}".format(record))
if stale:
- self.logger.info("PlImporter: deleting stale record: %s" % record)
+ self.logger.info("PlImporter: deleting stale record: {}".format(record))
global_dbsession.delete(record)
global_dbsession.commit()
+++ /dev/null
-"""
-This API is adapted for OpenLDAP. The file contains all LDAP classes and methods
-needed to:
-- Load the LDAP connection configuration file (login, address..) with LdapConfig
-- Connect to LDAP with ldap_co
-- Create a unique LDAP login and password for a user based on his email or last
-name and first name with LoginPassword.
-- Manage entries in LDAP using SFA records with LDAPapi (Search, Add, Delete,
-Modify)
-
-"""
-import random
-from passlib.hash import ldap_salted_sha1 as lssha
-
-from sfa.util.xrn import get_authority
-from sfa.util.sfalogging import logger
-from sfa.util.config import Config
-
-import ldap
-import ldap.modlist as modlist
-
-import os.path
-
-
-class LdapConfig():
- """
- Ldap configuration class loads the configuration file and sets the
- ldap IP address, password, people dn, web dn, group dn. All these settings
- were defined in a separate file ldap_config.py to avoid sharing them in
- the SFA git as it contains sensible information.
-
- """
- def __init__(self, config_file='/etc/sfa/ldap_config.py'):
- """Loads configuration from file /etc/sfa/ldap_config.py and set the
- parameters for connection to LDAP.
-
- """
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
-
-
-class ldap_co:
- """ Set admin login and server configuration variables."""
-
- def __init__(self):
- """Fetch LdapConfig attributes (Ldap server connection parameters and
- defines port , version and subtree scope.
-
- """
- #Iotlab PROD LDAP parameters
- self.ldapserv = None
- ldap_config = LdapConfig()
- self.config = ldap_config
- self.ldapHost = ldap_config.LDAP_IP_ADDRESS
- self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
- self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
- self.ldapAdminDN = ldap_config.LDAP_WEB_DN
- self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
- self.ldapPort = ldap.PORT
- self.ldapVersion = ldap.VERSION3
- self.ldapSearchScope = ldap.SCOPE_SUBTREE
-
- def connect(self, bind=True):
- """Enables connection to the LDAP server.
-
- :param bind: Set the bind parameter to True if a bind is needed
- (for add/modify/delete operations). Set to False otherwise.
- :type bind: boolean
- :returns: dictionary with status of the connection. True if Successful,
- False if not and in this case the error
- message( {'bool', 'message'} ).
- :rtype: dict
-
- """
- try:
- self.ldapserv = ldap.open(self.ldapHost)
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- # Bind with authentification
- if(bind):
- return self.bind()
-
- else:
- return {'bool': True}
-
- def bind(self):
- """ Binding method.
-
- :returns: dictionary with the bind status. True if Successful,
- False if not and in this case the error message({'bool','message'})
- :rtype: dict
-
- """
- try:
- # Opens a connection after a call to ldap.open in connect:
- self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
-
- # Bind/authenticate with a user with apropriate
- #rights to add objects
- self.ldapserv.simple_bind_s(self.ldapAdminDN,
- self.ldapAdminPassword)
-
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- return {'bool': True}
-
- def close(self):
- """Close the LDAP connection.
-
- Can throw an exception if the unbinding fails.
-
- :returns: dictionary with the bind status if the unbinding failed and
- in this case the dict contains an error message. The dictionary keys
- are : ({'bool','message'})
- :rtype: dict or None
-
- """
- try:
- self.ldapserv.unbind_s()
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
-
-class LoginPassword():
- """
-
- Class to handle login and password generation, using custom login generation
- algorithm.
-
- """
- def __init__(self):
- """
-
- Sets password and login maximum length, and defines the characters that
- can be found in a random generated password.
-
- """
- self.login_max_length = 8
- self.length_password = 8
- self.chars_password = ['!', '$', '(',')', '*', '+', ',', '-', '.',
- '0', '1', '2', '3', '4', '5', '6', '7', '8',
- '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
- 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
- '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- '\'']
-
- @staticmethod
- def clean_user_names(record):
- """
-
- Removes special characters such as '-', '_' , '[', ']' and ' ' from the
- first name and last name.
-
- :param record: user's record
- :type record: dict
- :returns: lower_first_name and lower_last_name if they were found
- in the user's record. Return None, none otherwise.
- :rtype: string, string or None, None.
-
- """
- if 'first_name' in record and 'last_name' in record:
- #Remove all special characters from first_name/last name
- lower_first_name = record['first_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- lower_last_name = record['last_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- return lower_first_name, lower_last_name
- else:
- return None, None
-
- @staticmethod
- def extract_name_from_email(record):
- """
-
- When there is no valid first name and last name in the record,
- the email is used to generate the login. Here, we assume the email
- is firstname.lastname@something.smthg. The first name and last names
- are extracted from the email, special charcaters are removed and
- they are changed into lower case.
-
- :param record: user's data
- :type record: dict
- :returns: the first name and last name taken from the user's email.
- lower_first_name, lower_last_name.
- :rtype: string, string
-
- """
-
- email = record['email']
- email = email.split('@')[0].lower()
- lower_first_name = None
- lower_last_name = None
- #Assume there is first name and last name in email
- #if there is a separator
- separator_list = ['.', '_', '-']
- for sep in separator_list:
- if sep in email:
- mail = email.split(sep)
- lower_first_name = mail[0]
- lower_last_name = mail[1]
- break
-
- #Otherwise just take the part before the @ as the
- #lower_first_name and lower_last_name
- if lower_first_name is None:
- lower_first_name = email
- lower_last_name = email
-
- return lower_first_name, lower_last_name
-
- def get_user_firstname_lastname(self, record):
- """
-
- Get the user first name and last name from the information we have in
- the record.
-
- :param record: user's information
- :type record: dict
- :returns: the user's first name and last name.
-
- .. seealso:: clean_user_names
- .. seealso:: extract_name_from_email
-
- """
- lower_first_name, lower_last_name = self.clean_user_names(record)
-
- #No first name and last name check email
- if lower_first_name is None and lower_last_name is None:
-
- lower_first_name, lower_last_name = \
- self.extract_name_from_email(record)
-
- return lower_first_name, lower_last_name
-
- # XXX JORDAN: This function writes an error in the log but returns normally :))
- def choose_sets_chars_for_login(self, lower_first_name, lower_last_name):
- """
-
- Algorithm to select sets of characters from the first name and last
- name, depending on the lenght of the last name and the maximum login
- length which in our case is set to 8 characters.
-
- :param lower_first_name: user's first name in lower case.
- :param lower_last_name: usr's last name in lower case.
- :returns: user's login
- :rtype: string
-
- """
- length_last_name = len(lower_last_name)
- self.login_max_length = 8
-
- #Try generating a unique login based on first name and last name
-
- if length_last_name >= self.login_max_length:
- login = lower_last_name[0:self.login_max_length]
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name >= 4:
- login = lower_last_name
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 3:
- login = lower_first_name[0:1] + lower_last_name
- index = 1
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 2:
- if len(lower_first_name) >= 2:
- login = lower_first_name[0:2] + lower_last_name
- index = 2
- logger.debug("login : %s index : %s" % (login, index))
- else:
- logger.error("LoginException : \
- Generation login error with \
- minimum four characters")
-
- else:
- logger.error("LDAP LdapGenerateUniqueLogin failed : \
- impossible to generate unique login for %s %s"
- % (lower_first_name, lower_last_name))
- logger.debug("JORDAN choose_sets_chars_for_login %d %s" % (index, login))
- return index, login
-
- def generate_password(self):
- """
-
- Generate a password upon adding a new user in LDAP Directory
- (8 characters length). The generated password is composed of characters
- from the chars_password list.
-
- :returns: the randomly generated password
- :rtype: string
-
- """
- password = str()
-
- length = len(self.chars_password)
- for index in range(self.length_password):
- char_index = random.randint(0, length - 1)
- password += self.chars_password[char_index]
-
- return password
-
- @staticmethod
- def encrypt_password(password):
- """
-
- Use passlib library to make a RFC2307 LDAP encrypted password salt size
- is 8, use sha-1 algorithm.
-
- :param password: password not encrypted.
- :type password: string
- :returns: Returns encrypted password.
- :rtype: string
-
- """
- #Keep consistency with Java Iotlab's LDAP API
- #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
- return lssha.encrypt(password, salt_size=8)
-
-
-class LDAPapi:
- """Defines functions to insert and search entries in the LDAP.
-
- .. note:: class supposes the unix schema is used
-
- """
- def __init__(self):
- logger.setLevelDebug()
-
- #SFA related config
-
- config = Config()
- self.login_pwd = LoginPassword()
- self.authname = config.SFA_REGISTRY_ROOT_AUTH
- self.conn = ldap_co()
- self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
- self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
- self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
- self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
- self.baseDN = self.conn.ldapPeopleDN
- self.ldapShell = '/bin/bash'
-
-
- def LdapGenerateUniqueLogin(self, record):
- """
-
- Generate login for adding a new user in LDAP Directory
- (four characters minimum length). Get proper last name and
- first name so that the user's login can be generated.
-
- :param record: Record must contain first_name and last_name.
- :type record: dict
- :returns: the generated login for the user described with record if the
- login generation is successful, None if it fails.
- :rtype: string or None
-
- """
- #For compatibility with other ldap func
- if 'mail' in record and 'email' not in record:
- record['email'] = record['mail']
-
- lower_first_name, lower_last_name = \
- self.login_pwd.get_user_firstname_lastname(record)
-
- index, login = self.login_pwd.choose_sets_chars_for_login(
- lower_first_name, lower_last_name)
-
- login_filter = '(uid=' + login + ')'
- get_attrs = ['uid']
- try:
- #Check if login already in use
-
- while (len(self.LdapSearch(login_filter, get_attrs)) is not 0):
-
- index += 1
- if index >= 9:
- logger.error("LoginException : Generation login error \
- with minimum four characters")
- break
- else:
- try:
- login = \
- lower_first_name[0:index] + \
- lower_last_name[0:
- self.login_pwd.login_max_length
- - index]
- logger.debug("JORDAN trying login: %r" % login)
- login_filter = '(uid=' + login + ')'
- except KeyError:
- print "lower_first_name - lower_last_name too short"
-
- logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"
- % (login))
- return login
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" % (error))
- return None
-
- def find_max_uidNumber(self):
- """Find the LDAP max uidNumber (POSIX uid attribute).
-
- Used when adding a new user in LDAP Directory
-
- :returns: max uidNumber + 1
- :rtype: string
-
- """
- #First, get all the users in the LDAP
- get_attrs = "(uidNumber=*)"
- login_filter = ['uidNumber']
-
- result_data = self.LdapSearch(get_attrs, login_filter)
- #It there is no user in LDAP yet, First LDAP user
- if result_data == []:
- max_uidnumber = self.ldapUserUidNumberMin
- #Otherwise, get the highest uidNumber
- else:
- uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]
- logger.debug("LDAPapi.py \tfind_max_uidNumber \
- uidNumberList %s " % (uidNumberList))
- max_uidnumber = max(uidNumberList) + 1
-
- return str(max_uidnumber)
-
-
- def get_ssh_pkey(self, record):
- """TODO ; Get ssh public key from sfa record
- To be filled by N. Turro ? or using GID pl way?
-
- """
- return 'A REMPLIR '
-
- @staticmethod
- #TODO Handle OR filtering in the ldap query when
- #dealing with a list of records instead of doing a for loop in GetPersons
- def make_ldap_filters_from_record(record=None):
- """Helper function to make LDAP filter requests out of SFA records.
-
- :param record: user's sfa record. Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: LDAP request
- :rtype: string
-
- """
- logger.debug("JORDAN make_ldap_filters_from_record: %r" % record)
- req_ldap = ''
- req_ldapdict = {}
- if record :
- if 'first_name' in record and 'last_name' in record:
- if record['first_name'] != record['last_name']:
- req_ldapdict['cn'] = str(record['first_name'])+" "\
- + str(record['last_name'])
- if 'uid' in record:
- req_ldapdict['uid'] = record['uid']
- if 'email' in record:
- req_ldapdict['mail'] = record['email']
- if 'mail' in record:
- req_ldapdict['mail'] = record['mail']
- if 'enabled' in record:
- if record['enabled'] is True:
- req_ldapdict['shadowExpire'] = '-1'
- else:
- req_ldapdict['shadowExpire'] = '0'
-
- #Hrn should not be part of the filter because the hrn
- #presented by a certificate of a SFA user not imported in
- #Iotlab does not include the iotlab login in it
- #Plus, the SFA user may already have an account with iotlab
- #using another login.
-
- logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
- record %s req_ldapdict %s"
- % (record, req_ldapdict))
-
- for k in req_ldapdict:
- req_ldap += '(' + str(k) + '=' + str(req_ldapdict[k]) + ')'
- if len(req_ldapdict.keys()) >1 :
- req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
- size = len(req_ldap)
- req_ldap = req_ldap[:(size-1)] + ')' + req_ldap[(size-1):]
- else:
- req_ldap = "(cn=*)"
-
- return req_ldap
-
- def make_ldap_attributes_from_record(self, record):
- """
-
- When adding a new user to Iotlab's LDAP, creates an attributes
- dictionnary from the SFA record understandable by LDAP. Generates the
- user's LDAP login.User is automatically validated (account enabled)
- and described as a SFA USER FROM OUTSIDE IOTLAB.
-
- :param record: must contain the following keys and values:
- first_name, last_name, mail, pkey (ssh key).
- :type record: dict
- :returns: dictionary of attributes using LDAP data structure model.
- :rtype: dict
-
- """
- logger.debug("JORDAN make_ldap_attributes_from_record: %r" % record)
-
- attrs = {}
- attrs['objectClass'] = ["top", "person", "inetOrgPerson",
- "organizationalPerson", "posixAccount",
- "shadowAccount", "systemQuotas",
- "ldapPublicKey"]
-
- attrs['uid'] = self.LdapGenerateUniqueLogin(record)
- try:
- attrs['givenName'] = str(record['first_name']).lower().capitalize()
- attrs['sn'] = str(record['last_name']).lower().capitalize()
- attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
- attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
-
- except KeyError:
- attrs['givenName'] = attrs['uid']
- attrs['sn'] = attrs['uid']
- attrs['cn'] = attrs['uid']
- attrs['gecos'] = attrs['uid']
-
- attrs['quota'] = self.ldapUserQuotaNFS
- attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
- attrs['loginShell'] = self.ldapShell
- attrs['gidNumber'] = self.ldapUserGidNumber
- attrs['uidNumber'] = self.find_max_uidNumber()
- attrs['mail'] = record['mail'].lower()
- try:
- attrs['sshPublicKey'] = record['pkey']
- except KeyError:
- attrs['sshPublicKey'] = self.get_ssh_pkey(record)
-
-
- #Password is automatically generated because SFA user don't go
- #through the Iotlab website used to register new users,
- #There is no place in SFA where users can enter such information
- #yet.
- #If the user wants to set his own password , he must go to the Iotlab
- #website.
- password = self.login_pwd.generate_password()
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
-
- #Account automatically validated (no mail request to admins)
- #Set to 0 to disable the account, -1 to enable it,
- attrs['shadowExpire'] = '-1'
-
- #Motivation field in Iotlab
- attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
-
- attrs['ou'] = 'SFA' #Optional: organizational unit
- #No info about those here:
- attrs['l'] = 'To be defined'#Optional: Locality.
- attrs['st'] = 'To be defined' #Optional: state or province (country).
-
- return attrs
-
-
- def LdapAddUser(self, record) :
- """Add SFA user to LDAP if it is not in LDAP yet.
-
- :param record: dictionnary with the user's data.
- :returns: a dictionary with the status (Fail= False, Success= True)
- and the uid of the newly added user if successful, or the error
- message it is not. Dict has keys bool and message in case of
- failure, and bool uid in case of success.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record
-
- """
- filter_by = self.make_ldap_filters_from_record({'email' : record['email']})
- user = self.LdapSearch(filter_by)
- if user:
- logger.debug("LDAPapi.py user ldap exist \t%s" % user)
- # user = [('uid=saint,ou=People,dc=senslab,dc=info', {'uid': ['saint'], 'givenName': ['Fred'], ...})]
- return {'bool': True, 'uid': user[0][1]['uid'][0]}
- else:
- self.conn.connect()
- user_ldap_attrs = self.make_ldap_attributes_from_record(record)
- logger.debug("LDAPapi.py user ldap doesn't exist \t%s" % user_ldap_attrs)
- # The dn of our new entry/object
- dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
- try:
- ldif = modlist.addModlist(user_ldap_attrs)
- self.conn.ldapserv.add_s(dn, ldif)
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Add Error %s" % error)
- return {'bool': False, 'message': error}
- self.conn.close()
- return {'bool': True, 'uid': user_ldap_attrs['uid']}
-
-
- def LdapDelete(self, person_dn):
- """Deletes a person in LDAP. Uses the dn of the user.
-
- :param person_dn: user's ldap dn.
- :type person_dn: string
- :returns: dictionary with bool True if successful, bool False
- and the error if not.
- :rtype: dict
-
- """
- #Connect and bind
- result = self.conn.connect()
- if(result['bool']):
- try:
- self.conn.ldapserv.delete_s(person_dn)
- self.conn.close()
- return {'bool': True}
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Delete Error %s" % error)
- return {'bool': False, 'message': error}
-
- def LdapDeleteUser(self, record_filter):
- """Deletes a SFA person in LDAP, based on the user's hrn.
-
- :param record_filter: Filter to find the user to be deleted. Must
- contain at least the user's email.
- :type record_filter: dict
- :returns: dict with bool True if successful, bool False and error
- message otherwise.
- :rtype: dict
-
- .. seealso:: LdapFindUser docstring for more info on record filter.
- .. seealso:: LdapDelete for user deletion
-
- """
- #Find uid of the person
- person = self.LdapFindUser(record_filter, [])
- logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s"
- % (record_filter, person))
-
- if person:
- dn = 'uid=' + person['uid'] + "," + self.baseDN
- else:
- return {'bool': False}
-
- result = self.LdapDelete(dn)
- return result
-
- def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
- """ Modifies a LDAP entry, replaces user's old attributes with
- the new ones given.
-
- :param dn: user's absolute name in the LDAP hierarchy.
- :param old_attributes_dict: old user's attributes. Keys must match
- the ones used in the LDAP model.
- :param new_attributes_dict: new user's attributes. Keys must match
- the ones used in the LDAP model.
- :type dn: string
- :type old_attributes_dict: dict
- :type new_attributes_dict: dict
- :returns: dict bool True if Successful, bool False if not.
- :rtype: dict
-
- """
-
- ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
- # Connect and bind/authenticate
- result = self.conn.connect()
- if (result['bool']):
- try:
- self.conn.ldapserv.modify_s(dn, ldif)
- self.conn.close()
- return {'bool': True}
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapModify Error %s" % error)
- return {'bool': False}
-
-
- def LdapModifyUser(self, user_record, new_attributes_dict):
- """
-
- Gets the record from one user based on the user sfa recordand changes
- the attributes according to the specified new_attributes. Do not use
- this if we need to modify the uid. Use a ModRDN operation instead
- ( modify relative DN ).
-
- :param user_record: sfa user record.
- :param new_attributes_dict: new user attributes, keys must be the
- same as the LDAP model.
- :type user_record: dict
- :type new_attributes_dict: dict
- :returns: bool True if successful, bool False if not.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record for info on what is mandatory
- in the user_record.
- .. seealso:: make_ldap_attributes_from_record for the LDAP objectclass.
-
- """
- if user_record is None:
- logger.error("LDAP \t LdapModifyUser Need user record ")
- return {'bool': False}
-
- #Get all the attributes of the user_uid_login
- #person = self.LdapFindUser(record_filter,[])
- req_ldap = self.make_ldap_filters_from_record(user_record)
- person_list = self.LdapSearch(req_ldap, [])
- logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s"
- % (person_list))
-
- if person_list and len(person_list) > 1:
- logger.error("LDAP \t LdapModifyUser Too many users returned")
- return {'bool': False}
- if person_list is None:
- logger.error("LDAP \t LdapModifyUser User %s doesn't exist "
- % (user_record))
- return {'bool': False}
-
- # The dn of our existing entry/object
- #One result only from ldapSearch
- person = person_list[0][1]
- dn = 'uid=' + person['uid'][0] + "," + self.baseDN
-
- if new_attributes_dict:
- old = {}
- for k in new_attributes_dict:
- if k not in person:
- old[k] = ''
- else:
- old[k] = person[k]
- logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"
- % (new_attributes_dict))
- result = self.LdapModify(dn, old, new_attributes_dict)
- return result
- else:
- logger.error("LDAP \t LdapModifyUser No new attributes given. ")
- return {'bool': False}
-
-
- def LdapMarkUserAsDeleted(self, record):
- """
-
- Sets shadowExpire to 0, disabling the user in LDAP. Calls LdapModifyUser
- to change the shadowExpire of the user.
-
- :param record: the record of the user who has to be disabled.
- Should contain first_name,last_name, email or mail, and if the
- record is enabled or not. If the dict record does not have all of
- these, must at least contain the user's email.
- :type record: dict
- :returns: {bool: True} if successful or {bool: False} if not
- :rtype: dict
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
- """
-
- new_attrs = {}
- #Disable account
- new_attrs['shadowExpire'] = '0'
- logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
- ret = self.LdapModifyUser(record, new_attrs)
- return ret
-
- def LdapResetPassword(self, record):
- """Resets password for the user whose record is the parameter and
- changes the corresponding entry in the LDAP.
-
- :param record: user's sfa record whose Ldap password must be reset.
- Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: return value of LdapModifyUser. True if successful, False
- otherwise.
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
-
- """
- password = self.login_pwd.generate_password()
- attrs = {}
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
- logger.debug("LDAP LdapResetPassword encrypt_password %s"
- % (attrs['userPassword']))
- result = self.LdapModifyUser(record, attrs)
- return result
-
-
- def LdapSearch(self, req_ldap=None, expected_fields=None):
- """
- Used to search directly in LDAP, by using ldap filters and return
- fields. When req_ldap is None, returns all the entries in the LDAP.
-
- :param req_ldap: ldap style request, with appropriate filters,
- example: (cn=*).
- :param expected_fields: Fields in the user ldap entry that has to be
- returned. If None is provided, will return 'mail', 'givenName',
- 'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
- :type req_ldap: string
- :type expected_fields: list
-
- .. seealso:: make_ldap_filters_from_record for req_ldap format.
-
- """
- logger.debug("JORDAN LdapSearch, req_ldap=%r, expected_fields=%r" % (req_ldap, expected_fields))
- result = self.conn.connect(bind=False)
- if (result['bool']):
-
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey', 'shadowExpire']
- else:
- return_fields_list = expected_fields
- #No specifc request specified, get the whole LDAP
- if req_ldap is None:
- req_ldap = '(cn=*)'
-
- logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
- return_fields_list %s" \
- %(req_ldap, return_fields_list))
-
- try:
- msg_id = self.conn.ldapserv.search(
- self.baseDN, ldap.SCOPE_SUBTREE,
- req_ldap, return_fields_list)
- #Get all the results matching the search from ldap in one
- #shot (1 value)
- result_type, result_data = \
- self.conn.ldapserv.result(msg_id, 1)
-
- self.conn.close()
-
- logger.debug("LDAP.PY \t LdapSearch result_data %s"
- % (result_data))
-
- return result_data
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapSearch Error %s" % error)
- return []
-
- else:
- logger.error("LDAP.PY \t Connection Failed")
- return
-
- def _process_ldap_info_for_all_users(self, result_data):
- """Process the data of all enabled users in LDAP.
-
- :param result_data: Contains information of all enabled users in LDAP
- and is coming from LdapSearch.
- :param result_data: list
-
- .. seealso:: LdapSearch
-
- """
- results = []
- logger.debug(" LDAP.py _process_ldap_info_for_all_users result_data %s "
- % (result_data))
- for ldapentry in result_data:
- logger.debug(" LDAP.py _process_ldap_info_for_all_users \
- ldapentry name : %s " % (ldapentry[1]['uid'][0]))
- tmpname = ldapentry[1]['uid'][0]
- hrn = self.authname + "." + tmpname
-
- tmpemail = ldapentry[1]['mail'][0]
- if ldapentry[1]['mail'][0] == "unknown":
- tmpemail = None
-
- try:
- results.append({
- 'type': 'user',
- 'pkey': ldapentry[1]['sshPublicKey'][0],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname ,
- 'email':tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry[1]['givenName'][0],
- 'last_name': ldapentry[1]['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': self.authname,
- 'peer_authority': '',
- 'pointer': -1,
- 'hrn': hrn,
- })
- except KeyError, error:
- logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s"
- % (error))
- return
-
- return results
-
- def _process_ldap_info_for_one_user(self, record, result_data):
- """
-
- Put the user's ldap data into shape. Only deals with one user
- record and one user data from ldap.
-
- :param record: user record
- :param result_data: Raw ldap data coming from LdapSearch
- :returns: user's data dict with 'type','pkey','uid', 'email',
- 'first_name' 'last_name''serial''authority''peer_authority'
- 'pointer''hrn'
- :type record: dict
- :type result_data: list
- :rtype :dict
-
- """
- #One entry only in the ldap data because we used a filter
- #to find one user only
- ldapentry = result_data[0][1]
- logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" % (ldapentry))
- tmpname = ldapentry['uid'][0]
-
- tmpemail = ldapentry['mail'][0]
- if ldapentry['mail'][0] == "unknown":
- tmpemail = None
-
- parent_hrn = None
- peer_authority = None
- # If the user is coming from External authority (e.g. OneLab)
- # Then hrn is None, it should be filled in by the creation of Ldap User
- # XXX LOIC !!! What if a user email is in 2 authorities?
- if 'hrn' in record and record['hrn'] is not None:
- hrn = record['hrn']
- parent_hrn = get_authority(hrn)
- if parent_hrn != self.authname:
- peer_authority = parent_hrn
- #In case the user was not imported from Iotlab LDAP
- #but from another federated site, has an account in
- #iotlab but currently using his hrn from federated site
- #then the login is different from the one found in its hrn
- if tmpname != hrn.split('.')[1]:
- hrn = None
- else:
- hrn = None
-
- if hrn is None:
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname,
- 'email': tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer': -1,
- }
- else:
- #hrn = None
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname,
- 'email': tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer': -1,
- 'hrn': hrn,
- }
- return results
-
- def LdapFindUser(self, record=None, is_user_enabled=None,
- expected_fields=None):
- """
-
- Search a SFA user with a hrn. User should be already registered
- in Iotlab LDAP.
-
- :param record: sfa user's record. Should contain first_name,last_name,
- email or mail. If no record is provided, returns all the users found
- in LDAP.
- :type record: dict
- :param is_user_enabled: is the user's iotlab account already valid.
- :type is_user_enabled: Boolean.
- :returns: LDAP entries from ldap matching the filter provided. Returns
- a single entry if one filter has been given and a list of
- entries otherwise.
- :rtype: dict or list
-
- """
- logger.debug("JORDAN LdapFindUser record=%r, is_user_enabled=%r, expected_fields=%r" % (record, is_user_enabled, expected_fields))
-
- custom_record = {}
- if is_user_enabled:
- custom_record['enabled'] = is_user_enabled
- if record:
- custom_record.update(record)
-
- req_ldap = self.make_ldap_filters_from_record(custom_record)
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey']
- else:
- return_fields_list = expected_fields
-
- result_data = self.LdapSearch(req_ldap, return_fields_list)
- logger.debug("LDAP.PY \t LdapFindUser result_data %s" % (result_data))
-
- if len(result_data) == 0:
- return None
- #Asked for a specific user
- if record is not None:
- logger.debug("LOIC - record = %s" % record)
- results = self._process_ldap_info_for_one_user(record, result_data)
-
- else:
- #Asked for all users in ldap
- results = self._process_ldap_info_for_all_users(result_data)
- return results
+++ /dev/null
-"""
-File used to handle issuing request to OAR and parse OAR's JSON responses.
-Contains the following classes:
-- JsonPage : handles multiple pages OAR answers.
-- OARRestapi : handles issuing POST or GET requests to OAR.
-- ParsingResourcesFull : dedicated to parsing OAR's answer to a get resources
-full request.
-- OARGETParser : handles parsing the Json answers to different GET requests.
-
-"""
-from httplib import HTTPConnection, HTTPException, NotConnected
-import json
-from sfa.util.config import Config
-from sfa.util.sfalogging import logger
-import os.path
-
-
-class JsonPage:
-
- """Class used to manipulate json pages given by OAR.
-
- In case the json answer from a GET request is too big to fit in one json
- page, this class provides helper methods to retrieve all the pages and
- store them in a list before putting them into one single json dictionary,
- facilitating the parsing.
-
- """
-
- def __init__(self):
- """Defines attributes to manipulate and parse the json pages.
-
- """
- #All are boolean variables
- self.concatenate = False
- #Indicates end of data, no more pages to be loaded.
- self.end = False
- self.next_page = False
- #Next query address
- self.next_offset = None
- #Json page
- self.raw_json = None
-
- def FindNextPage(self):
- """
- Gets next data page from OAR when the query's results are too big to
- be transmitted in a single page. Uses the "links' item in the json
- returned to check if an additionnal page has to be loaded. Updates
- object attributes next_page, next_offset, and end.
-
- """
- if "links" in self.raw_json:
- for page in self.raw_json['links']:
- if page['rel'] == 'next':
- self.concatenate = True
- self.next_page = True
- self.next_offset = "?" + page['href'].split("?")[1]
- return
-
- if self.concatenate:
- self.end = True
- self.next_page = False
- self.next_offset = None
-
- return
-
- #Otherwise, no next page and no concatenate, must be a single page
- #Concatenate the single page and get out of here.
- else:
- self.next_page = False
- self.concatenate = True
- self.next_offset = None
- return
-
- @staticmethod
- def ConcatenateJsonPages(saved_json_list):
- """
- If the json answer is too big to be contained in a single page,
- all the pages have to be loaded and saved before being appended to the
- first page.
-
- :param saved_json_list: list of all the stored pages, including the
- first page.
- :type saved_json_list: list
- :returns: Returns a dictionary with all the pages saved in the
- saved_json_list. The key of the dictionary is 'items'.
- :rtype: dict
-
-
- .. seealso:: SendRequest
- .. warning:: Assumes the apilib is 0.2.10 (with the 'items' key in the
- raw json dictionary)
-
- """
- #reset items list
-
- tmp = {}
- tmp['items'] = []
-
- for page in saved_json_list:
- tmp['items'].extend(page['items'])
- return tmp
-
- def ResetNextPage(self):
- """
- Resets all the Json page attributes (next_page, next_offset,
- concatenate, end). Has to be done before getting another json answer
- so that the previous page status does not affect the new json load.
-
- """
- self.next_page = True
- self.next_offset = None
- self.concatenate = False
- self.end = False
-
-
-class OARrestapi:
- """Class used to connect to the OAR server and to send GET and POST
- requests.
-
- """
-
- # classes attributes
-
- OAR_REQUEST_POST_URI_DICT = {'POST_job': {'uri': '/oarapi/jobs.json'},
- 'DELETE_jobs_id':
- {'uri': '/oarapi/jobs/id.json'},
- }
-
- POST_FORMAT = {'json': {'content': "application/json", 'object': json}}
-
- #OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
- #'workdir':"/home/", 'walltime':""}
-
- def __init__(self, config_file='/etc/sfa/oar_config.py'):
- self.oarserver = {}
- self.oarserver['uri'] = None
- self.oarserver['postformat'] = 'json'
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
-
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
- #logger.setLevelDebug()
- self.oarserver['ip'] = self.OAR_IP
- self.oarserver['port'] = self.OAR_PORT
- self.jobstates = ['Terminated', 'Hold', 'Waiting', 'toLaunch',
- 'toError', 'toAckReservation', 'Launching',
- 'Finishing', 'Running', 'Suspended', 'Resuming',
- 'Error']
-
- self.parser = OARGETParser(self)
-
-
- def GETRequestToOARRestAPI(self, request, strval=None,
- next_page=None, username=None):
-
- """Makes a GET request to OAR.
-
- Fetch the uri associated with the resquest stored in
- OARrequests_uri_dict, adds the username if needed and if available, adds
- strval to the request uri if needed, connects to OAR and issues the GET
- request. Gets the json reply.
-
- :param request: One of the known get requests that are keys in the
- OARrequests_uri_dict.
- :param strval: used when a job id has to be specified.
- :param next_page: used to tell OAR to send the next page for this
- Get request. Is appended to the GET uri.
- :param username: used when a username has to be specified, when looking
- for jobs scheduled by a particular user for instance.
-
- :type request: string
- :type strval: integer
- :type next_page: boolean
- :type username: string
- :returns: a json dictionary if OAR successfully processed the GET
- request.
-
- .. seealso:: OARrequests_uri_dict
- """
- self.oarserver['uri'] = \
- OARGETParser.OARrequests_uri_dict[request]['uri']
- #Get job details with username
- if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
- self.oarserver['uri'] += \
- OARGETParser.OARrequests_uri_dict[request]['owner'] + username
- headers = {}
- data = json.dumps({})
- logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" % (request))
- if strval:
- self.oarserver['uri'] = self.oarserver['uri'].\
- replace("id", str(strval))
-
- if next_page:
- self.oarserver['uri'] += next_page
-
- if username:
- headers['X-REMOTE_IDENT'] = username
-
- logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
- self.oarserver['uri'] %s strval %s"
- % (self.oarserver['uri'], strval))
- try:
- #seems that it does not work if we don't add this
- headers['content-length'] = '0'
-
- conn = HTTPConnection(self.oarserver['ip'],
- self.oarserver['port'])
- conn.request("GET", self.oarserver['uri'], data, headers)
- resp = conn.getresponse()
- body = resp.read()
- except Exception as error:
- logger.log_exc("GET_OAR_SRVR : Connection error: %s "
- % (error))
- raise Exception ("GET_OAR_SRVR : Connection error %s " %(error))
-
- finally:
- conn.close()
-
- # except HTTPException, error:
- # logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s "
- # % (error))
- #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
- if resp.status >= 400:
- raise ValueError ("Response Error %s, %s, %s" %(resp.status,
- resp.reason, resp.read()))
- try:
- js_dict = json.loads(body)
- #print "\r\n \t\t\t js_dict keys" , js_dict.keys(), " \r\n", js_dict
- return js_dict
-
- except ValueError, error:
- logger.log_exc("Failed to parse Server Response: %s ERROR %s"
- % (body, error))
- #raise ServerError("Failed to parse Server Response:" + js)
-
-
- def POSTRequestToOARRestAPI(self, request, datadict, username=None):
- """ Used to post a job on OAR , along with data associated
- with the job.
-
- """
-
- #first check that all params for are OK
- try:
- self.oarserver['uri'] = \
- self.OAR_REQUEST_POST_URI_DICT[request]['uri']
-
- except KeyError:
- logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
- valid")
- return
- if datadict and 'strval' in datadict:
- self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
- str(datadict['strval']))
- del datadict['strval']
-
- data = json.dumps(datadict)
- headers = {'X-REMOTE_IDENT':username, \
- 'content-type': self.POST_FORMAT['json']['content'], \
- 'content-length':str(len(data))}
- try :
-
- conn = HTTPConnection(self.oarserver['ip'], \
- self.oarserver['port'])
- conn.request("POST", self.oarserver['uri'], data, headers)
- resp = conn.getresponse()
- body = resp.read()
-
- except NotConnected:
- logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
- data %s \r\n \t\n \t\t headers %s uri %s" \
- %(data,headers,self.oarserver['uri']))
- except Exception as error:
- logger.log_exc("POST_OAR_SERVER : Connection error: %s "
- % (error))
- raise Exception ("POST_OAR_SERVER : Connection error %s " %(error))
-
- finally:
- conn.close()
-
- if resp.status >= 400:
- raise ValueError ("Response Error %s, %s, %s" %(resp.status,
- resp.reason, body))
-
-
- try:
- answer = json.loads(body)
- logger.debug("POSTRequestToOARRestAPI : answer %s" % (answer))
- return answer
-
- except ValueError, error:
- logger.log_exc("Failed to parse Server Response: error %s \
- %s" %(error))
- #raise ServerError("Failed to parse Server Response:" + answer)
-
-
-class ParsingResourcesFull():
- """
- Class dedicated to parse the json response from a GET_resources_full from
- OAR.
-
- """
- def __init__(self):
- """
- Set the parsing dictionary. Works like a switch case, if the key is
- found in the dictionary, then the associated function is called.
- This is used in ParseNodes to create an usable dictionary from
- the Json returned by OAR when issuing a GET resources full request.
-
- .. seealso:: ParseNodes
-
- """
- self.resources_fulljson_dict = {
- 'network_address': self.AddNodeNetworkAddr,
- 'site': self.AddNodeSite,
- # 'radio': self.AddNodeRadio,
- 'mobile': self.AddMobility,
- 'x': self.AddPosX,
- 'y': self.AddPosY,
- 'z': self.AddPosZ,
- 'archi': self.AddHardwareType,
- 'state': self.AddBootState,
- 'id': self.AddOarNodeId,
- 'mobility_type': self.AddMobilityType,
- }
-
-
-
- def AddOarNodeId(self, tuplelist, value):
- """Adds Oar internal node id to the nodes' attributes.
-
- Appends tuple ('oar_id', node_id) to the tuplelist. Used by ParseNodes.
-
- .. seealso:: ParseNodes
-
- """
-
- tuplelist.append(('oar_id', int(value)))
-
-
- def AddNodeNetworkAddr(self, dictnode, value):
- """First parsing function to be called to parse the json returned by OAR
- answering a GET_resources (/oarapi/resources.json) request.
-
- When a new node is found in the json, this function is responsible for
- creating a new entry in the dictionary for storing information on this
- specific node. The key is the node network address, which is also the
- node's hostname.
- The value associated with the key is a tuple list.It contains all
- the nodes attributes. The tuplelist will later be turned into a dict.
-
- :param dictnode: should be set to the OARGETParser atribute
- node_dictlist. It will store the information on the nodes.
- :param value: the node_id is the network_address in the raw json.
- :type value: string
- :type dictnode: dictionary
-
- .. seealso: ParseResources, ParseNodes
- """
-
- node_id = value
- dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]
-
- return node_id
-
- def AddNodeSite(self, tuplelist, value):
- """Add the site's node to the dictionary.
-
-
- :param tuplelist: tuple list on which to add the node's site.
- Contains the other node attributes as well.
- :param value: value to add to the tuple list, in this case the node's
- site.
- :type tuplelist: list
- :type value: string
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('site', str(value)))
-
- # def AddNodeRadio(tuplelist, value):
- # """Add thenode's radio chipset type to the tuple list.
-
- # :param tuplelist: tuple list on which to add the node's mobility
- # status. The tuplelist is the value associated with the node's
- # id in the OARGETParser
- # 's dictionary node_dictlist.
- # :param value: name of the radio chipset on the node.
- # :type tuplelist: list
- # :type value: string
-
- # .. seealso:: AddNodeNetworkAddr
-
- # """
- # tuplelist.append(('radio', str(value)))
-
- def AddMobilityType(self, tuplelist, value):
- """Adds which kind of mobility it is, train or roomba robot.
-
- :param tuplelist: tuple list on which to add the node's mobility status.
- The tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: tells if a node is a mobile node or not. The value is
- found in the json.
-
- :type tuplelist: list
- :type value: integer
-
- """
- tuplelist.append(('mobility_type', str(value)))
-
-
- def AddMobility(self, tuplelist, value):
- """Add if the node is a mobile node or not to the tuple list.
-
- :param tuplelist: tuple list on which to add the node's mobility status.
- The tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: tells if a node is a mobile node or not. The value is found
- in the json.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- if value is 0:
- tuplelist.append(('mobile', 'False'))
- else:
- tuplelist.append(('mobile', 'True'))
-
-
- def AddPosX(self, tuplelist, value):
- """Add the node's position on the x axis.
-
- :param tuplelist: tuple list on which to add the node's position . The
- tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: the position x.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('posx', value ))
-
-
-
- def AddPosY(self, tuplelist, value):
- """Add the node's position on the y axis.
-
- :param tuplelist: tuple list on which to add the node's position . The
- tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: the position y.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('posy', value))
-
-
-
- def AddPosZ(self, tuplelist, value):
- """Add the node's position on the z axis.
-
- :param tuplelist: tuple list on which to add the node's position . The
- tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: the position z.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
-
- tuplelist.append(('posz', value))
-
-
-
- def AddBootState(tself, tuplelist, value):
- """Add the node's state, Alive or Suspected.
-
- :param tuplelist: tuple list on which to add the node's state . The
- tuplelist is the value associated with the node's id in the
- OARGETParser 's dictionary node_dictlist.
- :param value: node's state.
-
- :type tuplelist: list
- :type value: string
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('boot_state', str(value)))
-
-
- def AddHardwareType(self, tuplelist, value):
- """Add the node's hardware model and radio chipset type to the tuple
- list.
-
- :param tuplelist: tuple list on which to add the node's architecture
- and radio chipset type.
- :param value: hardware type: radio chipset. The value contains both the
- architecture and the radio chipset, separated by a colon.
- :type tuplelist: list
- :type value: string
-
- .. seealso:: AddNodeNetworkAddr
-
- """
-
- value_list = value.split(':')
- tuplelist.append(('archi', value_list[0]))
- tuplelist.append(('radio', value_list[1]))
-
-
-class OARGETParser:
- """Class providing parsing methods associated to specific GET requests.
-
- """
-
- def __init__(self, srv):
- self.version_json_dict = {
- 'api_version': None, 'apilib_version': None,
- 'api_timezone': None, 'api_timestamp': None, 'oar_version': None}
- self.config = Config()
- self.interface_hrn = self.config.SFA_INTERFACE_HRN
- self.timezone_json_dict = {
- 'timezone': None, 'api_timestamp': None, }
- #self.jobs_json_dict = {
- #'total' : None, 'links' : [],\
- #'offset':None , 'items' : [], }
- #self.jobs_table_json_dict = self.jobs_json_dict
- #self.jobs_details_json_dict = self.jobs_json_dict
- self.server = srv
- self.node_dictlist = {}
-
- self.json_page = JsonPage()
- self.parsing_resourcesfull = ParsingResourcesFull()
- self.site_dict = {}
- self.jobs_list = []
- self.SendRequest("GET_version")
-
-
- def ParseVersion(self):
- """Parses the OAR answer to the GET_version ( /oarapi/version.json.)
-
- Finds the OAR apilib version currently used. Has an impact on the json
- structure returned by OAR, so the version has to be known before trying
- to parse the jsons returned after a get request has been issued.
- Updates the attribute version_json_dict.
-
- """
-
- if 'oar_version' in self.json_page.raw_json:
- self.version_json_dict.update(
- api_version=self.json_page.raw_json['api_version'],
- apilib_version=self.json_page.raw_json['apilib_version'],
- api_timezone=self.json_page.raw_json['api_timezone'],
- api_timestamp=self.json_page.raw_json['api_timestamp'],
- oar_version=self.json_page.raw_json['oar_version'])
- else:
- self.version_json_dict.update(
- api_version=self.json_page.raw_json['api'],
- apilib_version=self.json_page.raw_json['apilib'],
- api_timezone=self.json_page.raw_json['api_timezone'],
- api_timestamp=self.json_page.raw_json['api_timestamp'],
- oar_version=self.json_page.raw_json['oar'])
-
- print self.version_json_dict['apilib_version']
-
-
- def ParseTimezone(self):
- """Get the timezone used by OAR.
-
- Get the timezone from the answer to the GET_timezone request.
- :return: api_timestamp and api timezone.
- :rype: integer, integer
-
- .. warning:: unused.
- """
- api_timestamp = self.json_page.raw_json['api_timestamp']
- api_tz = self.json_page.raw_json['timezone']
- return api_timestamp, api_tz
-
- def ParseJobs(self):
- """Called when a GET_jobs request has been issued to OAR.
-
- Corresponds to /oarapi/jobs.json uri. Currently returns the raw json
- information dict.
- :returns: json_page.raw_json
- :rtype: dictionary
-
- .. warning:: Does not actually parse the information in the json. SA
- 15/07/13.
-
- """
- self.jobs_list = []
- print " ParseJobs "
- return self.json_page.raw_json
-
- def ParseJobsTable(self):
- """In case we need to use the job table in the future.
-
- Associated with the GET_jobs_table : '/oarapi/jobs/table.json uri.
- .. warning:: NOT USED. DOES NOTHING.
- """
- print "ParseJobsTable"
-
- def ParseJobsDetails(self):
- """Currently only returns the same json in self.json_page.raw_json.
-
- .. todo:: actually parse the json
- .. warning:: currently, this function is not used a lot, so I have no
- idea what could be useful to parse, returning the full json. NT
- """
-
- #logger.debug("ParseJobsDetails %s " %(self.json_page.raw_json))
- return self.json_page.raw_json
-
-
- def ParseJobsIds(self):
- """Associated with the GET_jobs_id OAR request.
-
- Parses the json dict (OAR answer) to the GET_jobs_id request
- /oarapi/jobs/id.json.
-
-
- :returns: dictionary whose keys are listed in the local variable
- job_resources and values that are in the json dictionary returned
- by OAR with the job information.
- :rtype: dict
-
- """
- job_resources = ['wanted_resources', 'name', 'id', 'start_time',
- 'state', 'owner', 'walltime', 'message']
-
- # Unused variable providing the contents of the json dict returned from
- # get job resources full request
- job_resources_full = [
- 'launching_directory', 'links',
- 'resubmit_job_id', 'owner', 'events', 'message',
- 'scheduled_start', 'id', 'array_id', 'exit_code',
- 'properties', 'state', 'array_index', 'walltime',
- 'type', 'initial_request', 'stop_time', 'project',
- 'start_time', 'dependencies', 'api_timestamp', 'submission_time',
- 'reservation', 'stdout_file', 'types', 'cpuset_name',
- 'name', 'wanted_resources', 'queue', 'stderr_file', 'command']
-
-
- job_info = self.json_page.raw_json
- #logger.debug("OARESTAPI ParseJobsIds %s" %(self.json_page.raw_json))
- values = []
- try:
- for k in job_resources:
- values.append(job_info[k])
- return dict(zip(job_resources, values))
-
- except KeyError:
- logger.log_exc("ParseJobsIds KeyError ")
-
-
- def ParseJobsIdResources(self):
- """ Parses the json produced by the request
- /oarapi/jobs/id/resources.json.
- Returns a list of oar node ids that are scheduled for the
- given job id.
-
- """
- job_resources = []
- for resource in self.json_page.raw_json['items']:
- job_resources.append(resource['id'])
-
- return job_resources
-
- def ParseResources(self):
- """ Parses the json produced by a get_resources request on oar."""
-
- #logger.debug("OARESTAPI \tParseResources " )
- #resources are listed inside the 'items' list from the json
- self.json_page.raw_json = self.json_page.raw_json['items']
- self.ParseNodes()
-
- def ParseReservedNodes(self):
- """ Returns an array containing the list of the jobs scheduled
- with the reserved nodes if available.
-
- :returns: list of job dicts, each dict containing the following keys:
- t_from, t_until, resources_ids (of the reserved nodes for this job).
- If the information is not available, default values will be set for
- these keys. The other keys are : state, lease_id and user.
- :rtype: list
-
- """
-
- #resources are listed inside the 'items' list from the json
- reservation_list = []
- job = {}
- #Parse resources info
- for json_element in self.json_page.raw_json['items']:
- #In case it is a real reservation (not asap case)
- if json_element['scheduled_start']:
- job['t_from'] = json_element['scheduled_start']
- job['t_until'] = int(json_element['scheduled_start']) + \
- int(json_element['walltime'])
- #Get resources id list for the job
- job['resource_ids'] = [node_dict['id'] for node_dict
- in json_element['resources']]
- else:
- job['t_from'] = "As soon as possible"
- job['t_until'] = "As soon as possible"
- job['resource_ids'] = ["Undefined"]
-
- job['state'] = json_element['state']
- job['lease_id'] = json_element['id']
-
- job['user'] = json_element['owner']
- #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))
- reservation_list.append(job)
- #reset dict
- job = {}
- return reservation_list
-
- def ParseRunningJobs(self):
- """ Gets the list of nodes currently in use from the attributes of the
- running jobs.
-
- :returns: list of hostnames, the nodes that are currently involved in
- running jobs.
- :rtype: list
-
-
- """
- logger.debug("OARESTAPI \tParseRunningJobs_________________ ")
- #resources are listed inside the 'items' list from the json
- nodes = []
- for job in self.json_page.raw_json['items']:
- for node in job['nodes']:
- nodes.append(node['network_address'])
- return nodes
-
- def ChangeRawJsonDependingOnApilibVersion(self):
- """
- Check if the OAR apilib version is different from 0.2.10, in which case
- the Json answer is also dict instead as a plain list.
-
- .. warning:: the whole code is assuming the json contains a 'items' key
- .. seealso:: ConcatenateJsonPages, ParseJobs, ParseReservedNodes,
- ParseJobsIdResources, ParseResources, ParseRunningJobs
- .. todo:: Clean the whole code. Either suppose the apilib will always
- provide the 'items' key, or handle different options.
- """
-
- if self.version_json_dict['apilib_version'] != "0.2.10":
- self.json_page.raw_json = self.json_page.raw_json['items']
-
- def ParseDeleteJobs(self):
- """ No need to parse anything in this function.A POST
- is done to delete the job.
-
- """
- return
-
- def ParseResourcesFull(self):
- """ This method is responsible for parsing all the attributes
- of all the nodes returned by OAR when issuing a get resources full.
- The information from the nodes and the sites are separated.
- Updates the node_dictlist so that the dictionnary of the platform's
- nodes is available afterwards.
-
- :returns: node_dictlist, a list of dictionaries about the nodes and
- their properties.
- :rtype: list
-
- """
- logger.debug("OARRESTAPI ParseResourcesFull___________ ")
- #print self.json_page.raw_json[1]
- #resources are listed inside the 'items' list from the json
- self.ChangeRawJsonDependingOnApilibVersion()
- self.ParseNodes()
- self.ParseSites()
- return self.node_dictlist
-
- def ParseResourcesFullSites(self):
- """ Called by GetSites which is unused.
- Originally used to get information from the sites, with for each site
- the list of nodes it has, along with their properties.
-
- :return: site_dict, dictionary of sites
- :rtype: dict
-
- .. warning:: unused
- .. seealso:: GetSites (IotlabShell)
-
- """
- self.ChangeRawJsonDependingOnApilibVersion()
- self.ParseNodes()
- self.ParseSites()
- return self.site_dict
-
-
- def ParseNodes(self):
- """ Parse nodes properties from OAR
- Put them into a dictionary with key = node id and value is a dictionary
- of the node properties and properties'values.
-
- """
- node_id = None
- _resources_fulljson_dict = \
- self.parsing_resourcesfull.resources_fulljson_dict
- keys = _resources_fulljson_dict.keys()
- keys.sort()
-
- for dictline in self.json_page.raw_json:
- node_id = None
- # dictionary is empty and/or a new node has to be inserted
- node_id = _resources_fulljson_dict['network_address'](
- self.node_dictlist, dictline['network_address'])
- for k in keys:
- if k in dictline:
- if k == 'network_address':
- continue
-
- _resources_fulljson_dict[k](
- self.node_dictlist[node_id], dictline[k])
-
- #The last property has been inserted in the property tuple list,
- #reset node_id
- #Turn the property tuple list (=dict value) into a dictionary
- self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
- node_id = None
-
- @staticmethod
- def iotlab_hostname_to_hrn(root_auth, hostname):
- """
- Transforms a node hostname into a SFA hrn.
-
- :param root_auth: Name of the root authority of the SFA server. In
- our case, it is set to iotlab.
- :param hostname: node's hotname, given by OAR.
- :type root_auth: string
- :type hostname: string
- :returns: inserts the root_auth and '.' before the hostname.
- :rtype: string
-
- """
- return root_auth + '.' + hostname
-
- def ParseSites(self):
- """ Returns a list of dictionnaries containing the sites' attributes."""
-
- nodes_per_site = {}
- config = Config()
- #logger.debug(" OARrestapi.py \tParseSites self.node_dictlist %s"\
- #%(self.node_dictlist))
- # Create a list of nodes per site_id
- for node_id in self.node_dictlist:
- node = self.node_dictlist[node_id]
-
- if node['site'] not in nodes_per_site:
- nodes_per_site[node['site']] = []
- nodes_per_site[node['site']].append(node['node_id'])
- else:
- if node['node_id'] not in nodes_per_site[node['site']]:
- nodes_per_site[node['site']].append(node['node_id'])
-
- #Create a site dictionary whose key is site_login_base
- # (name of the site) and value is a dictionary of properties,
- # including the list of the node_ids
- for node_id in self.node_dictlist:
- node = self.node_dictlist[node_id]
- node.update({'hrn': self.iotlab_hostname_to_hrn(self.interface_hrn,
- node['hostname'])})
- self.node_dictlist.update({node_id: node})
-
- if node['site'] not in self.site_dict:
- self.site_dict[node['site']] = {
- 'site': node['site'],
- 'node_ids': nodes_per_site[node['site']],
- 'latitude': "48.83726",
- 'longitude': "- 2.10336",
- 'name': config.SFA_REGISTRY_ROOT_AUTH,
- 'pcu_ids': [], 'max_slices': None,
- 'ext_consortium_id': None,
- 'max_slivers': None, 'is_public': True,
- 'peer_site_id': None,
- 'abbreviated_name': "iotlab", 'address_ids': [],
- 'url': "https://portal.senslab.info", 'person_ids': [],
- 'site_tag_ids': [], 'enabled': True, 'slice_ids': [],
- 'date_created': None, 'peer_id': None
- }
-
- OARrequests_uri_dict = {
- 'GET_version':
- {'uri': '/oarapi/version.json', 'parse_func': ParseVersion},
-
- 'GET_timezone':
- {'uri': '/oarapi/timezone.json', 'parse_func': ParseTimezone},
-
- 'GET_jobs':
- {'uri': '/oarapi/jobs.json', 'parse_func': ParseJobs},
-
- 'GET_jobs_id':
- {'uri': '/oarapi/jobs/id.json', 'parse_func': ParseJobsIds},
-
- 'GET_jobs_id_resources':
- {'uri': '/oarapi/jobs/id/resources.json',
- 'parse_func': ParseJobsIdResources},
-
- 'GET_jobs_table':
- {'uri': '/oarapi/jobs/table.json', 'parse_func': ParseJobsTable},
-
- 'GET_jobs_details':
- {'uri': '/oarapi/jobs/details.json', 'parse_func': ParseJobsDetails},
-
- 'GET_reserved_nodes':
- {'uri':
- '/oarapi/jobs/details.json?state=Running,Waiting,Launching',
- 'owner': '&user=', 'parse_func': ParseReservedNodes},
-
- 'GET_running_jobs':
- {'uri': '/oarapi/jobs/details.json?state=Running',
- 'parse_func': ParseRunningJobs},
-
- 'GET_resources_full':
- {'uri': '/oarapi/resources/full.json',
- 'parse_func': ParseResourcesFull},
-
- 'GET_sites':
- {'uri': '/oarapi/resources/full.json',
- 'parse_func': ParseResourcesFullSites},
-
- 'GET_resources':
- {'uri': '/oarapi/resources.json', 'parse_func': ParseResources},
-
- 'DELETE_jobs_id':
- {'uri': '/oarapi/jobs/id.json', 'parse_func': ParseDeleteJobs}}
-
-
- def SendRequest(self, request, strval=None, username=None):
- """ Connects to OAR , sends the valid GET requests and uses
- the appropriate json parsing functions.
-
- :returns: calls to the appropriate parsing function, associated with the
- GET request
- :rtype: depends on the parsing function called.
-
- .. seealso:: OARrequests_uri_dict
- """
- save_json = None
-
- self.json_page.ResetNextPage()
- save_json = []
-
- if request in self.OARrequests_uri_dict:
- while self.json_page.next_page:
- self.json_page.raw_json = self.server.GETRequestToOARRestAPI(
- request,
- strval,
- self.json_page.next_offset,
- username)
- self.json_page.FindNextPage()
- if self.json_page.concatenate:
- save_json.append(self.json_page.raw_json)
-
- if self.json_page.concatenate and self.json_page.end:
- self.json_page.raw_json = \
- self.json_page.ConcatenateJsonPages(save_json)
-
- return self.OARrequests_uri_dict[request]['parse_func'](self)
- else:
- logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST "
- % (request))
+++ /dev/null
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = build
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
- @echo "Please use \`make <target>' where <target> is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " texinfo to make Texinfo files"
- @echo " info to make Texinfo files and run them through makeinfo"
- @echo " gettext to make PO message catalogs"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/IotlabSFAdriver.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/IotlabSFAdriver.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/IotlabSFAdriver"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/IotlabSFAdriver"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo
- @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
- @echo "Run \`make' in that directory to run these through makeinfo" \
- "(use \`make info' here to do that automatically)."
-
-info:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo "Running Texinfo files through makeinfo..."
- make -C $(BUILDDIR)/texinfo info
- @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
- $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
- @echo
- @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# Iotlab SFA driver documentation build configuration file, created by
-# sphinx-quickstart on Tue Jul 2 11:53:15 2013.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../../../'))
-sys.path.insert(0, os.path.abspath('../../../storage/'))
-sys.path.insert(0, os.path.abspath('../../../../'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/versions/'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/'))
-sys.path.insert(0, os.path.abspath('../../../importer/'))
-print sys.path
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Iotlab SFA driver'
-copyright = u'2013, Sandrine Avakian '
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '1.0'
-# The full version, including alpha/beta/rc tags.
-release = '1.0'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'IotlabSFAdriverdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'IotlabSFAdriver.tex', u'Iotlab SFA driver Documentation',
- u'Sandrine Avakian ', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'iotlabsfadriver', u'Iotlab SFA driver Documentation',
- [u'Sandrine Avakian '], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output ------------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'IotlabSFAdriver', u'Iotlab SFA driver Documentation',
- u'Sandrine Avakian ', 'IotlabSFAdriver', 'One line description of project.',
- 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-
-# -- Options for Epub output ---------------------------------------------------
-
-# Bibliographic Dublin Core info.
-epub_title = u'Iotlab SFA driver'
-epub_author = u'Sandrine Avakian '
-epub_publisher = u'Sandrine Avakian '
-epub_copyright = u'2013, Sandrine Avakian '
-
-# The language of the text. It defaults to the language option
-# or en if the language is not set.
-#epub_language = ''
-
-# The scheme of the identifier. Typical schemes are ISBN or URL.
-#epub_scheme = ''
-
-# The unique identifier of the text. This can be a ISBN number
-# or the project homepage.
-#epub_identifier = ''
-
-# A unique identification for the text.
-#epub_uid = ''
-
-# A tuple containing the cover image and cover page html template filenames.
-#epub_cover = ()
-
-# HTML files that should be inserted before the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_pre_files = []
-
-# HTML files shat should be inserted after the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_post_files = []
-
-# A list of files that should not be packed into the epub file.
-#epub_exclude_files = []
-
-# The depth of the table of contents in toc.ncx.
-#epub_tocdepth = 3
-
-# Allow duplicate toc entries.
-#epub_tocdup = True
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+++ /dev/null
-importer Package
-=================
-
-:mod:`iotlabimporter` Module
-------------------------------
-
-.. automodule:: importer.iotlabimporter
- :members:
- :undoc-members:
- :show-inheritance:
-
-
+++ /dev/null
-.. Iotlab SFA driver documentation master file, created by
- sphinx-quickstart on Tue Jul 2 11:53:15 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-Welcome to Iotlab SFA driver's documentation!
-=============================================
-
-===================
-Code tree overview
-===================
-
-------------
-Installation
-------------
-**Using git**
-
-git clone git://git.onelab.eu/sfa.git
-cd sfa
-git checkout <version>
-make version
-python setup.py install
-
-<version> can be either geni-v2 or geni-v3.
-------
-Driver
-------
-**Folder**:/sfa/sfa/iotlab/
-
-The Iotlab driver source code is under the folder /sfa, along with the other
-testbeds driver folders. The /iotlab directory contains the necessary files
-defining API for OAR, LDAP, the postgresql table which is hosted in the SFA
-database as well as for the SFA managers.
-
-The OAR API enables the user to get information regarding nodes and jobs:
-nodes properties (hostnames, radio, mobility type, position with GPS
-coordinates and so on), jobs and the associated username and nodes.
-These are used when querying the testbed about resources
-and leases. In order to add a new node property in the iotlab Rspec format,
-the new property must be defined and parsed beforehand from OAR in the OAR
-API file.
-
-In the LDAP file, the LDAPapi class supposes the unix schema is used.
-If this class is reused in another context, it might not work without some bit
-of customization. The naming (turning a hostname into a sfa hrn, a LDAP login
-into a hrn ) is also done in this class.
-
-The iotlabpostgres file defines a dedicated lease table, hosted in the SFA
-database (in SFA version geni-v3) or in a separated and dedicated Iotlab
-database(in SFA geni-v2). Its purpose is to hold information that we
-can't store anywhere given the Iotlab architecture with OAR and LDAP, namely the
-association of a job and the slice hrn for which the job is supposed to run.
-Indeed, one user may register on another federated testbed then use his
-federated slice to book iotlab nodes. In this case, an Iotlab LDAP account will
-be created. Later on, when new users will be imported from the LDAP to the SFA
-database, an Iotlab slice will be created for each new user found in the LDAP.
-Thus leading us to the situation where one user may have the possibility to use
-different slices to book Iotlab nodes.
-
-----------------------------
-RSpec specific Iotlab format
-----------------------------
-**Folder**:/sfa/rspecs/versions , /sfa/rpecs/elements
-
-There is a specific Iotlab Rspec format. It aims at displaying information that
-is not hadled in the SFA Rspec format. Typically, it adds the nodes' mobility
-and its mobility type, the hardware architecture as well as the radio
-chipset used. This is done by defining a iotlabv1 rspec version format file
-under /rspecs/versions. Definitions of an iotlab rspec lease, node and sliver
-are done in the associated files under /rspecs/elements/versions.
-If a property has to be added to the nodes in the Iotlab Rspec format, it
-should be added in the iotlabv1Node file, using the other properties as example.
-
-Future work:
-The Rspec format has to be validated and stored on a website, as the header
-of the return Rspec defines it, which is not the case with the Iotlab rspec
-format. It has been discussed with Mohamed Larabi (Inria Sophia) once, never to
-be mentionned again. Although this does not prevent the SFA server from working,
-maybe it would be nice to be completely compliantand clean in this aspect also.
--SA Dec 2013-
-
---------
-Importer
---------
-**Folder**: /sfa/importer/
-
-The iotlab importer task is to populate the SFA database with records created
-from the information given by OAR and LDAP. Running the importer periodically
-enables the SFA database to be in sync with the LDAP by deleting/ adding records
-in the database corresponding to newly deleted/ added users in LDAP.
-
---------------
-Documentation
---------------
-**Folder** : /sfa/sfa/iotlab/docs
-
-Thsi folder contains the sphinx files needed to generate this documentation.
-As of Dec 2013, and because of the SFA database connexion methods, generating
-the documentation fails if the database is not accessible. In this case,
-Iotlabimporter will not be documented.
-A possible workaround is to build the documentation on the SFA server hosting
-the SFA database (which is not a really clean way to this...).
-To ngenerate the documentation, do "make html" in the /docs folder, where the
-Makefile is located. The package python-sphinx must be installed in order
-for this command to work.
-
-
---------
-Testing
---------
-Two scripts have been written to help with the testing. One is dedicated for
-testing the Iotlab driver, OAR and LDAP classes. The second is to check if the
-client commands work well.
-
-**Folder** : /sfa/testbeds/iotlab/tests
-
-* driver_tests.py : python script to test OAR, LDAP and Iotlabdriver/ IotlabShell
- methods. Modify the script to add more tests if needed.
-
- **starting the script** :python ./driver_tests <-option value> <option>
- example : python ./driver_tests -10 OAR (10 is the job_id in this case)
- option can be : OAR, sql, shell, LDAP , driver, all.
-
-* sfi_client_tests.py : python script to test all the sfi client commands :
- resources, list, allocate (geni-v3), provision(geni-v3), resources, show, status
- and delete. In the geni-v2 branch, this script uses create_sliver instead.
-
- **starting the script** : python ./sfi_client_tests.py <absolute path to the
- rspecs>.
- The Rspecs are included in the git repository under ./sfa/testbeds/iotlab/tests/tests_rspecs.
-
-
-
-.. toctree::
- :maxdepth: 2
-
-Code Documentation
-==================
-
-.. toctree::
- :maxdepth: 2
-
- iotlab.rst
- versions.rst
- importer.rst
-
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
+++ /dev/null
-iotlab Package
-==============
-
-:mod:`LDAPapi` Module
----------------------
-
-.. automodule:: iotlab.LDAPapi
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`OARrestapi` Module
-------------------------
-
-.. automodule:: iotlab.OARrestapi
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabaggregate` Module
------------------------------
-
-.. automodule:: iotlab.iotlabaggregate
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabshell` Module
---------------------------
-
-.. automodule:: iotlab.iotlabshell
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabdriver` Module
---------------------------
-
-.. automodule:: iotlab.iotlabdriver
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabpostgres` Module
-----------------------------
-
-.. automodule:: iotlab.iotlabpostgres
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabslices` Module
---------------------------
-
-.. automodule:: iotlab.iotlabslices
- :members:
- :undoc-members:
- :show-inheritance:
-
+++ /dev/null
-iotlab
-======
-
-.. toctree::
- :maxdepth: 4
-
- iotlab
-
-
-versions
-========
-
-.. toctree::
- :maxdepth: 4
-
- versions
+++ /dev/null
-versions Package
-================
-
-:mod:`iotlabv1Lease` Module
----------------------------
-
-.. automodule:: versions.iotlabv1Lease
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabv1Node` Module
---------------------------
-
-.. automodule:: versions.iotlabv1Node
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabv1Sliver` Module
-----------------------------
-
-.. automodule:: versions.iotlabv1Sliver
- :members:
- :undoc-members:
- :show-inheritance:
-
-"""
-File providing methods to generate valid RSpecs for the Iotlab testbed.
-Contains methods to get information on slice, slivers, nodes and leases,
-formatting them and turn it into a RSpec.
-"""
+# -*- coding:utf-8 -*-
+""" aggregate class management """
+
+from sfa.util.xrn import Xrn, hrn_to_urn
from sfa.util.sfatime import utcparse, datetime_to_string
-from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
-from sfa.iotlab.iotlabxrn import IotlabXrn
+from sfa.util.sfalogging import logger
from sfa.rspecs.rspec import RSpec
-#from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.login import Login
-# from sfa.rspecs.elements.services import ServicesElement
-from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.lease import Lease
from sfa.rspecs.elements.granularity import Granularity
from sfa.rspecs.version_manager import VersionManager
-from sfa.storage.model import SliverAllocation
-from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition, \
- IotlabNode, IotlabLocation
-from sfa.iotlab.iotlabxrn import xrn_object
-from sfa.util.sfalogging import logger
+from sfa.rspecs.elements.services import ServicesElement
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabNode
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabLocation
+from sfa.iotlab.iotlablease import LeaseTable
import time
+import datetime
-class IotlabAggregate:
- """Aggregate manager class for Iotlab. """
- sites = {}
- nodes = {}
- api = None
- interfaces = {}
- links = {}
- node_tags = {}
-
- prepared = False
-
- user_options = {}
+class IotLABAggregate(object):
+ """
+ SFA aggregate for Iot-LAB testbed
+ """
def __init__(self, driver):
self.driver = driver
- def get_slice_and_slivers(self, slice_xrn, login=None):
- """
- Get the slices and the associated leases if any from the iotlab
- testbed. One slice can have mutliple leases.
- For each slice, get the nodes in the associated lease
- and create a sliver with the necessary info and insert it into the
- sliver dictionary, keyed on the node hostnames.
- Returns a dict of slivers based on the sliver's node_id.
- Called by get_rspec.
-
-
- :param slice_xrn: xrn of the slice
- :param login: user's login on iotlab ldap
-
- :type slice_xrn: string
- :type login: string
- :returns: a list of slices dict and a list of Sliver object
- :rtype: (list, list)
-
- .. note: There is no real slivers in iotlab, only leases. The goal
- is to be consistent with the SFA standard.
-
- """
- slivers = {}
- sfa_slice = None
- if slice_xrn is None:
- return (sfa_slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
-
- # GetSlices always returns a list, even if there is only one element
- slices = self.driver.GetSlices(slice_filter=str(slice_hrn),
- slice_filter_type='slice_hrn',
- login=login)
-
- logger.debug("IotlabAggregate api \tget_slice_and_slivers \
- slice_hrn %s \r\n slices %s self.driver.hrn %s"
- % (slice_hrn, slices, self.driver.hrn))
- if slices == []:
- return (sfa_slice, slivers)
-
- # sort slivers by node id , if there is a job
- #and therefore, node allocated to this slice
- # for sfa_slice in slices:
- sfa_slice = slices[0]
- try:
- node_ids_list = sfa_slice['node_ids']
- except KeyError:
- logger.log_exc("IOTLABAGGREGATE \t \
- get_slice_and_slivers No nodes in the slice \
- - KeyError ")
- node_ids_list = []
- # continue
-
- for node in node_ids_list:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id': sliver_xrn.urn,
- 'name': sfa_slice['hrn'],
- 'type': 'iotlab-node',
- 'tags': []})
-
- slivers[node] = sliver
-
- #Add default sliver attribute :
- #connection information for iotlab
- # if get_authority(sfa_slice['hrn']) == \
- # self.driver.testbed_shell.root_auth:
- # tmp = sfa_slice['hrn'].split('.')
- # ldap_username = tmp[1].split('_')[0]
- # ssh_access = None
- # slivers['default_sliver'] = {'ssh': ssh_access,
- # 'login': ldap_username}
- # look in ldap:
- ldap_username = self.find_ldap_username_from_slice(sfa_slice)
-
- if ldap_username is not None:
- ssh_access = None
- slivers['default_sliver'] = {'ssh': ssh_access,
- 'login': ldap_username}
-
-
- logger.debug("IOTLABAGGREGATE api get_slice_and_slivers slivers %s "
- % (slivers))
- return (slices, slivers)
-
- def find_ldap_username_from_slice(self, sfa_slice):
- """
- Gets the ldap username of the user based on the information contained
- in ist sfa_slice record.
-
- :param sfa_slice: the user's slice record. Must contain the
- reg_researchers key.
- :type sfa_slice: dictionary
- :returns: ldap_username, the ldap user's login.
- :rtype: string
-
- """
- researchers = [sfa_slice['reg_researchers'][0].__dict__]
- # look in ldap:
- ldap_username = None
- ret = self.driver.testbed_shell.GetPersons(researchers)
- if len(ret) != 0:
- ldap_username = ret[0]['uid']
-
- return ldap_username
-
-
-
- def get_nodes(self, options=None):
- # def node_to_rspec_node(self, node, sites, node_tags,
- # grain=None, options={}):
- """Returns the nodes in the slice using the rspec format, with all the
- nodes' properties.
-
- Fetch the nodes ids in the slices dictionary and get all the nodes
- properties from OAR. Makes a rspec dicitonary out of this and returns
- it. If the slice does not have any job running or scheduled, that is
- it has no reserved nodes, then returns an empty list.
-
- :returns: An empty list if the slice has no reserved nodes, a rspec
- list with all the nodes and their properties (a dict per node)
- otherwise.
- :rtype: list
-
- .. seealso:: get_slice_and_slivers
-
- """
-
- # NT: the semantic of this function is not clear to me :
- # if slice is not defined, then all the nodes should be returned
- # if slice is defined, we should return only the nodes that
- # are part of this slice
- # but what is the role of the slivers parameter ?
- # So i assume that slice['node_ids'] will be the same as slivers for us
- filter_nodes = None
- if options:
- geni_available = options.get('geni_available')
- if geni_available == True:
- filter_nodes['boot_state'] = ['Alive']
-
- # slice_nodes_list = []
- # if slices is not None:
- # for one_slice in slices:
- # try:
- # slice_nodes_list = one_slice['node_ids']
- # # if we are dealing with a slice that has no node just
- # # return an empty list. In iotlab a slice can have multiple
- # # jobs scheduled, so it either has at least one lease or
- # # not at all.
- # except KeyError:
- # return []
-
- # get the granularity in second for the reservation system
- # grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- nodes = self.driver.testbed_shell.GetNodes(node_filter_dict =
- filter_nodes)
-
- nodes_dict = {}
-
- #if slices, this means we got to list all the nodes given to this slice
- # Make a list of all the nodes in the slice before getting their
- #attributes
- # rspec_nodes = []
-
- # logger.debug("IOTLABAGGREGATE api get_nodes slices %s "
- # % (slices))
-
- # reserved_nodes = self.driver.testbed_shell.GetNodesCurrentlyInUse()
- # logger.debug("IOTLABAGGREGATE api get_nodes slice_nodes_list %s "
- # % (slice_nodes_list))
- for node in nodes:
- nodes_dict[node['node_id']] = node
-
- return nodes_dict
+ def leases_to_rspec_leases(self, leases):
+ """ Get leases attributes list"""
+ rspec_leases = []
+ for lease in leases:
+ for node in lease['resources']:
+ rspec_lease = Lease()
+ rspec_lease['lease_id'] = lease['id']
+ iotlab_xrn = Xrn('.'.join([self.driver.root_auth,
+ Xrn.escape(node)]),
+ type='node')
+ rspec_lease['component_id'] = iotlab_xrn.urn
+ rspec_lease['start_time'] = str(lease['date'])
+ # duration in minutes
+ duration = int(lease['duration'])/60
+ rspec_lease['duration'] = duration
+ rspec_lease['slice_id'] = lease['slice_id']
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
def node_to_rspec_node(self, node):
- """ Creates a rspec node structure with the appropriate information
- based on the node information that can be found in the node dictionary.
-
- :param node: node data. this dict contains information about the node
- and must have the following keys : mobile, radio, archi, hostname,
- boot_state, site, x, y ,z (position).
- :type node: dictionary.
-
- :returns: node dictionary containing the following keys : mobile, archi,
- radio, component_id, component_name, component_manager_id,
- authority_id, boot_state, exclusive, hardware_types, location,
- position, granularity, tags.
- :rtype: dict
-
- """
-
- grain = self.driver.testbed_shell.GetLeaseGranularity()
-
+ """ Get node attributes """
rspec_node = IotlabNode()
- # xxx how to retrieve site['login_base']
- #site_id=node['site_id']
- #site=sites_dict[site_id]
-
rspec_node['mobile'] = node['mobile']
rspec_node['archi'] = node['archi']
- rspec_node['radio'] = node['radio']
-
- iotlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node['hostname'])
+ rspec_node['radio'] = (node['archi'].split(':'))[1]
+ iotlab_xrn = Xrn('.'.join([self.driver.root_auth,
+ Xrn.escape(node['network_address'])]),
+ type='node')
+ # rspec_node['boot_state'] = 'true'
+ if node['state'] == 'Absent' or \
+ node['state'] == 'Suspected' or \
+ node['state'] == 'Busy':
+ rspec_node['available'] = 'false'
+ else:
+ rspec_node['available'] = 'true'
rspec_node['component_id'] = iotlab_xrn.urn
- rspec_node['component_name'] = node['hostname']
- rspec_node['component_manager_id'] = \
- hrn_to_urn(self.driver.testbed_shell.root_auth,
- 'authority+sa')
-
- # Iotlab's nodes are federated : there is only one authority
- # for all Iotlab sites, registered in SFA.
- # Removing the part including the site
- # in authority_id SA 27/07/12
+ rspec_node['component_name'] = node['network_address']
+ rspec_node['component_manager_id'] = hrn_to_urn(self.driver.root_auth,
+ 'authority+sa')
rspec_node['authority_id'] = rspec_node['component_manager_id']
-
- # do not include boot state (<available> element)
- #in the manifest rspec
-
-
- rspec_node['boot_state'] = node['boot_state']
- # if node['hostname'] in reserved_nodes:
- # rspec_node['boot_state'] = "Reserved"
rspec_node['exclusive'] = 'true'
- rspec_node['hardware_types'] = [HardwareType({'name': \
- 'iotlab-node'})]
-
- location = IotlabLocation({'country':'France', 'site': \
- node['site']})
+ rspec_node['hardware_types'] = \
+ [HardwareType({'name': node['archi']})]
+ location = IotlabLocation({'country': 'France',
+ 'site': node['site']})
rspec_node['location'] = location
-
-
position = IotlabPosition()
- for field in position :
- try:
- position[field] = node[field]
- except KeyError, error :
- logger.log_exc("IOTLABAGGREGATE\t get_nodes \
- position %s "% (error))
-
- rspec_node['position'] = position
-
-
- # Granularity
- granularity = Granularity({'grain': grain})
+ for field in position:
+ position[field] = node[field]
+ granularity = Granularity({'grain': 30})
rspec_node['granularity'] = granularity
rspec_node['tags'] = []
- # if node['hostname'] in slivers:
- # # add sliver info
- # sliver = slivers[node['hostname']]
- # rspec_node['sliver_id'] = sliver['sliver_id']
- # rspec_node['client_id'] = node['hostname']
- # rspec_node['slivers'] = [sliver]
-
- # # slivers always provide the ssh service
- # login = Login({'authentication': 'ssh-keys', \
- # 'hostname': node['hostname'], 'port':'22', \
- # 'username': sliver['name']})
- # service = Services({'login': login})
- # rspec_node['services'] = [service]
-
return rspec_node
-
- def rspec_node_to_geni_sliver(self, rspec_node, sliver_allocations = None):
- """Makes a geni sliver structure from all the nodes allocated
- to slivers in the sliver_allocations dictionary. Returns the states
- of the sliver.
-
- :param rspec_node: Node information contained in a rspec data structure
- fashion.
- :type rspec_node: dictionary
- :param sliver_allocations:
- :type sliver_allocations: dictionary
-
- :returns: Dictionary with the following keys: geni_sliver_urn,
- geni_expires, geni_allocation_status, geni_operational_status,
- geni_error.
-
- :rtype: dictionary
-
- .. seealso:: node_to_rspec_node
-
- """
- if sliver_allocations is None: sliver_allocations={}
- if rspec_node['sliver_id'] in sliver_allocations:
- # set sliver allocation and operational status
- sliver_allocation = sliver_allocations[rspec_node['sliver_id']]
- if sliver_allocation:
- allocation_status = sliver_allocation.allocation_state
- if allocation_status == 'geni_allocated':
- op_status = 'geni_pending_allocation'
- elif allocation_status == 'geni_provisioned':
- op_status = 'geni_ready'
- else:
- op_status = 'geni_unknown'
- else:
- allocation_status = 'geni_unallocated'
- else:
- allocation_status = 'geni_unallocated'
- op_status = 'geni_failed'
- # required fields
- geni_sliver = {'geni_sliver_urn': rspec_node['sliver_id'],
- 'geni_expires': rspec_node['expires'],
- 'geni_allocation_status' : allocation_status,
- 'geni_operational_status': op_status,
- 'geni_error': '',
- }
- return geni_sliver
-
-
- def sliver_to_rspec_node(self, sliver, sliver_allocations):
- """Used by describe to format node information into a rspec compliant
- structure.
-
- Creates a node rspec compliant structure by calling node_to_rspec_node.
- Adds slivers, if any, to rspec node structure. Returns the updated
- rspec node struct.
-
- :param sliver: sliver dictionary. Contains keys: urn, slice_id, hostname
- and slice_name.
- :type sliver: dictionary
- :param sliver_allocations: dictionary of slivers
- :type sliver_allocations: dict
-
- :returns: Node dictionary with all necessary data.
-
- .. seealso:: node_to_rspec_node
- """
+ def sliver_to_rspec_node(self, sliver):
+ """ Get node and sliver attributes """
rspec_node = self.node_to_rspec_node(sliver)
rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
- # add sliver info
- logger.debug("IOTLABAGGREGATE api \t sliver_to_rspec_node sliver \
- %s \r\nsliver_allocations %s" % (sliver,
- sliver_allocations))
- rspec_sliver = Sliver({'sliver_id': sliver['urn'],
- 'name': sliver['slice_id'],
- 'type': 'iotlab-exclusive',
- 'tags': []})
- rspec_node['sliver_id'] = rspec_sliver['sliver_id']
-
- if sliver['urn'] in sliver_allocations:
- rspec_node['client_id'] = sliver_allocations[
- sliver['urn']].client_id
- if sliver_allocations[sliver['urn']].component_id:
- rspec_node['component_id'] = sliver_allocations[
- sliver['urn']].component_id
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ rspec_sliver = Sliver({'sliver_id': sliver['sliver_id'],
+ 'name': sliver['name'],
+ 'type': sliver['archi'],
+ 'tags': []})
rspec_node['slivers'] = [rspec_sliver]
-
# slivers always provide the ssh service
login = Login({'authentication': 'ssh-keys',
'hostname': sliver['hostname'],
- 'port':'22',
- 'username': sliver['slice_name'],
- 'login': sliver['slice_name']
- })
+ 'port': '22',
+ 'username': sliver['name'],
+ 'login': sliver['name']
+ })
+ service = ServicesElement({'login': login})
+ rspec_node['services'] = [service]
return rspec_node
+ @classmethod
+ def rspec_node_to_geni_sliver(cls, rspec_node):
+ """ Get sliver status """
+ geni_sliver = {'geni_sliver_urn': rspec_node['sliver_id'],
+ 'geni_expires': rspec_node['expires'],
+ 'geni_allocation_status': 'geni_allocated',
+ 'geni_operational_status': 'geni_pending_allocation',
+ 'geni_error': '',
+ }
+ return geni_sliver
- def get_leases(self, slice=None, options=None):
- if options is None: options={}
- filter={}
- if slice:
- filter.update({'slice_hrn':slice['slice_hrn']}) # JORDAN: this is = "upmc" !!!
- #filter.update({'name':slice['slice_name']})
- #return_fields = ['lease_id', 'hostname', 'site_id', 'name', 't_from', 't_until']
- leases = self.driver.GetLeases(lease_filter_dict=filter)
- grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
- #site = node['site_id']
- iotlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node)
- rspec_lease['component_id'] = iotlab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in testbed_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
-
- def get_all_leases(self, ldap_username):
- """
- Get list of lease dictionaries which all have the mandatory keys
- ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
- All the leases running or scheduled are returned.
-
- :param ldap_username: if ldap uid is not None, looks for the leases
- belonging to this user.
- :type ldap_username: string
- :returns: rspec lease dictionary with keys lease_id, component_id,
- slice_id, start_time, duration where the lease_id is the oar job id,
- component_id is the node's urn, slice_id is the slice urn,
- start_time is the timestamp starting time and duration is expressed
- in terms of the testbed's granularity.
- :rtype: dict
-
- .. note::There is no filtering of leases within a given time frame.
- All the running or scheduled leases are returned. options
- removed SA 15/05/2013
-
-
- """
-
- logger.debug("IOTLABAGGREGATE get_all_leases ldap_username %s "
- % (ldap_username))
- leases = self.driver.GetLeases(login=ldap_username)
- grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
- #site = node['site_id']
- iotlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node)
- rspec_lease['component_id'] = iotlab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in testbed_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
- def get_rspec(self, slice_xrn=None, login=None, version=None,
- options=None):
- """
- Returns xml rspec:
- - a full advertisement rspec with the testbed resources if slice_xrn is
- not specified.If a lease option is given, also returns the leases
- scheduled on the testbed.
- - a manifest Rspec with the leases and nodes in slice's leases if
- slice_xrn is not None.
-
- :param slice_xrn: srn of the slice
- :type slice_xrn: string
- :param login: user'uid (ldap login) on iotlab
- :type login: string
- :param version: can be set to sfa or iotlab
- :type version: RSpecVersion
- :param options: used to specify if the leases should also be included in
- the returned rspec.
- :type options: dict
-
- :returns: Xml Rspec.
- :rtype: XML
-
-
- """
-
- ldap_username = None
- rspec = None
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- logger.debug("IotlabAggregate \t get_rspec ***version %s \
- version.type %s version.version %s options %s \r\n"
- % (version, version.type, version.version, options))
-
- if slice_xrn is None:
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
-
- else:
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
-
- slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
- if slice_xrn and slices is not None:
- #Get user associated with this slice
- #for one_slice in slices :
- ldap_username = self.find_ldap_username_from_slice(slices[0])
- # ldap_username = slices[0]['reg_researchers'][0].__dict__['hrn']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- logger.debug("IotlabAggregate \tget_rspec **** \
- LDAP USERNAME %s \r\n" \
- % (ldap_username))
- #at this point sliver may be empty if no iotlab job
- #is running for this user/slice.
- rspec = RSpec(version=rspec_version, user_options=options)
-
- logger.debug("\r\n \r\n IotlabAggregate \tget_rspec *** \
- slice_xrn %s slices %s\r\n \r\n"
- % (slice_xrn, slices))
-
- if options is not None:
- lease_option = options['list_leases']
- else:
- #If no options are specified, at least print the resources
- lease_option = 'all'
- #if slice_xrn :
- #lease_option = 'all'
-
- if lease_option in ['all', 'resources']:
- #if not options.get('list_leases') or options.get('list_leases')
- #and options['list_leases'] != 'leases':
- nodes = self.get_nodes()
- logger.debug("\r\n")
- logger.debug("IotlabAggregate \t lease_option %s \
- get rspec ******* nodes %s"
- % (lease_option, nodes))
-
- sites_set = set([node['location']['site'] for node in nodes])
-
- #In case creating a job, slice_xrn is not set to None
- rspec.version.add_nodes(nodes)
- if slice_xrn and slices is not None:
- # #Get user associated with this slice
- # #for one_slice in slices :
- # ldap_username = slices[0]['reg_researchers']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- # # ldap_username = tmp[1].split('_')[0]
-
- logger.debug("IotlabAggregate \tget_rspec **** \
- version type %s ldap_ user %s \r\n" \
- % (version.type, ldap_username))
- if version.type == "Iotlab":
- rspec.version.add_connection_information(
- ldap_username, sites_set)
-
- default_sliver = slivers.get('default_sliver', [])
- if default_sliver and len(nodes) is not 0:
- #default_sliver_attribs = default_sliver.get('tags', [])
- logger.debug("IotlabAggregate \tget_rspec **** \
- default_sliver%s \r\n" % (default_sliver))
- for attrib in default_sliver:
- rspec.version.add_default_sliver_attribute(
- attrib, default_sliver[attrib])
-
- if lease_option in ['all','leases']:
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
- logger.debug("IotlabAggregate \tget_rspec **** \
- FINAL RSPEC %s \r\n" % (rspec.toxml()))
- return rspec.toxml()
-
- def get_slivers(self, urns, options=None):
- """Get slivers of the given slice urns. Slivers contains slice, node and
- user information.
-
- For Iotlab, returns the leases with sliver ids and their allocation
- status.
-
- :param urns: list of slice urns.
- :type urns: list of strings
- :param options: unused
- :type options: unused
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
+ def list_resources(self, version=None, options=None):
"""
- # JORDAN using SLICE_KEY for slice_hrn
- SLICE_KEY = 'slice_hrn' # slice_hrn
- if options is None: options={}
- slice_ids = set()
- node_ids = []
- for urn in urns:
- xrn = IotlabXrn(xrn=urn)
- if xrn.type == 'sliver':
- # id: slice_id-node_id
- try:
- sliver_id_parts = xrn.get_sliver_id_parts()
- slice_id = int(sliver_id_parts[0])
- node_id = int(sliver_id_parts[1])
- slice_ids.add(slice_id)
- node_ids.append(node_id)
- except ValueError:
- pass
- else:
- slice_names = set()
- slice_names.add(xrn.hrn)
-
-
- logger.debug("IotlabAggregate \t get_slivers urns %s slice_ids %s \
- node_ids %s\r\n" % (urns, slice_ids, node_ids))
- logger.debug("IotlabAggregate \t get_slivers xrn %s slice_names %s \
- \r\n" % (xrn, slice_names))
- filter_sliver = {}
- if slice_names:
- filter_sliver[SLICE_KEY] = list(slice_names)
- slice_hrn = filter_sliver[SLICE_KEY][0]
-
- slice_filter_type = SLICE_KEY
-
- # if slice_ids:
- # filter['slice_id'] = list(slice_ids)
- # # get slices
- if slice_hrn:
- #logger.debug("JORDAN SLICE_HRN=%r" % slice_hrn)
- slices = self.driver.GetSlices(slice_hrn,
- slice_filter_type)
- leases = self.driver.GetLeases({SLICE_KEY:slice_hrn})
- logger.debug("IotlabAggregate \t get_slivers \
- slices %s leases %s\r\n" % (slices, leases ))
- if not slices:
- return []
-
- single_slice = slices[0]
- # get sliver users
- # XXX LOIC !!! XXX QUICK AND DIRTY - Let's try...
- logger.debug("LOIC Number of reg_researchers = %s" % len(single_slice['reg_researchers']))
- if 'reg_researchers' in single_slice and len(single_slice['reg_researchers'])==0:
- user = {'uid':single_slice['user']}
- else:
- user = single_slice['reg_researchers'][0].__dict__
-
- user = single_slice['reg_researchers'][0].__dict__
- logger.debug("IotlabAggregate \t get_slivers user %s \
- \r\n" % (user))
-
- # construct user key info
- person = self.driver.testbed_shell.ldap.LdapFindUser(record=user)
- logger.debug("IotlabAggregate \t get_slivers person %s \
- \r\n" % (person))
- # name = person['last_name']
- user['login'] = person['uid']
- # XXX LOIC !!! if we have more info, let's fill user
- if 'hrn' in user:
- user['user_urn'] = hrn_to_urn(user['hrn'], 'user')
- if 'keys' in user:
- user['keys'] = person['pkey']
-
-
- try:
- logger.debug("############################################ iotlab AM : single_slice = %s" % single_slice)
- node_ids = single_slice['node_ids']
- node_list = self.driver.testbed_shell.GetNodes()
- logger.debug("############################################ iotlab AM : node_list = %s" % node_list)
-# JORDAN REMOVED FILTER so that next check always succeed
-# {'hostname':single_slice['node_ids']})
- node_by_hostname = dict([(node['hostname'], node)
- for node in node_list])
- except KeyError:
- logger.warning("\t get_slivers No slivers in slice")
- # slice['node_ids'] = node_ids
- # nodes_dict = self.get_slice_nodes(slice, options)
-
- slivers = []
- for current_lease in leases:
- for hostname in current_lease['reserved_nodes']:
- node = {}
- node['slice_id'] = current_lease['slice_id']
- node['slice_hrn'] = current_lease['slice_hrn']
- slice_name = current_lease['slice_hrn'].split(".")[1]
- node['slice_name'] = slice_name
- index = current_lease['reserved_nodes'].index(hostname)
- node_id = current_lease['resource_ids'][index]
- # node['slice_name'] = user['login']
- # node.update(single_slice)
- # JORDAN XXX This fails sometimes when hostname not in the list
- #if hostname in node_by_hostname:
- more_info = node_by_hostname[hostname]
- node.update(more_info)
- #else:
- # # This can happen when specifying a lease without the resource, then all subsequent calls will fail
- # logger.debug("Ignored missing hostname for now one")
- # oar_job_id is the slice_id (lease_id)
- sliver_hrn = '%s.%s-%s' % (self.driver.hrn,
- current_lease['lease_id'], node_id)
- node['node_id'] = node_id
- node['expires'] = current_lease['t_until']
- node['sliver_id'] = Xrn(sliver_hrn, type='sliver').urn
- node['urn'] = node['sliver_id']
- node['services_user'] = [user]
-
- slivers.append(node)
- return slivers
-
- def list_resources(self, version = None, options=None):
+ list_resources method sends a RSpec with all Iot-LAB testbed nodes
+ and leases (OAR job submission). For leases we get all OAR jobs with
+ state Waiting or Running. If we have an entry in SFA database
+ (lease table) with OAR job id this submission was launched by SFA
+ driver, otherwise it was launched by Iot-LAB Webportal or CLI-tools
+
+ :Example:
+ <rspec>
+ ...
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa"
+ component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.devgrenoble.iot-lab.info"
+ exclusive="true" component_name="m3-10.devgrenoble.iot-lab.info">
+ <hardware_type name="iotlab-node"/>
+ <location country="France"/>
+ <granularity grain="60"/>
+ ...
+ </node>
+ ...
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792400" duration="30">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/>
+ </lease>
+ ...
+ </rspec>
"""
- Returns an advertisement Rspec of available resources at this
- aggregate. This Rspec contains a resource listing along with their
- description, providing sufficient information for clients to be able to
- select among available resources.
-
- :param options: various options. The valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}} . The only
- mandatory options if options is specified is geni_rspec_version.
- :type options: dictionary
+ # pylint:disable=R0914,W0212
+ logger.warning("iotlabaggregate list_resources")
+ logger.warning("iotlabaggregate list_resources options %s" % options)
+ if not options:
+ options = {}
- :returns: On success, the value field of the return struct will contain
- a geni.rspec advertisment RSpec
- :rtype: Rspec advertisement in xml.
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#RSpecdatatype
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources
- """
-
- if options is None: options={}
version_manager = VersionManager()
version = version_manager.get_version(version)
rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
+ version.version,
+ 'ad')
rspec = RSpec(version=rspec_version, user_options=options)
- # variable ldap_username to be compliant with get_all_leases
- # prototype. Now unused in geni-v3 since we are getting all the leases
- # here
- ldap_username = None
- if not options.get('list_leases') or options['list_leases'] != 'leases':
- # get nodes
- nodes_dict = self.get_nodes(options)
- # no interfaces on iotlab nodes
+ nodes = self.driver.shell.get_nodes()
+ reserved_nodes = self.driver.shell.get_reserved_nodes()
+ if 'error' not in nodes and 'error' not in reserved_nodes:
# convert nodes to rspec nodes
rspec_nodes = []
- for node_id in nodes_dict:
- node = nodes_dict[node_id]
- rspec_node = self.node_to_rspec_node(node)
+ for node in nodes:
+ rspec_node = self.node_to_rspec_node(nodes[node])
rspec_nodes.append(rspec_node)
rspec.version.add_nodes(rspec_nodes)
- # add links
- # links = self.get_links(sites, nodes_dict, interfaces)
- # rspec.version.add_links(links)
-
- if not options.get('list_leases') or options.get('list_leases') \
- and options['list_leases'] != 'resources':
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
-
+ leases = []
+ db_leases = {}
+ # find OAR jobs id for all slices in SFA database
+ for lease in self.driver.api.dbsession().query(LeaseTable).all():
+ db_leases[lease.job_id] = lease.slice_hrn
+
+ for lease_id in reserved_nodes:
+ # onelab slice = job submission from OneLAB
+ if lease_id in db_leases:
+ reserved_nodes[lease_id]['slice_id'] = \
+ hrn_to_urn(db_leases[lease_id],
+ 'slice')
+ # iotlab slice = job submission from Iot-LAB
+ else:
+ reserved_nodes[lease_id]['slice_id'] = \
+ hrn_to_urn(self.driver.root_auth + '.' +
+ reserved_nodes[lease_id]['owner']+"_slice",
+ 'slice')
+ leases.append(reserved_nodes[lease_id])
+
+ rspec_leases = self.leases_to_rspec_leases(leases)
+ rspec.version.add_leases(rspec_leases)
return rspec.toxml()
+ def get_slivers(self, urns, leases, nodes):
+ """ Get slivers attributes list """
+ logger.warning("iotlabaggregate get_slivers")
+ logger.warning("iotlabaggregate get_slivers urns %s" % urns)
+ slivers = []
+ for lease in leases:
+ for node in lease['resources']:
+ network_address = node.split(".")
+ sliver_node = nodes[node]
+ sliver_hrn = '%s.%s-%s' % (self.driver.hrn,
+ lease['id'],
+ network_address[0])
+ start_time = datetime.datetime.fromtimestamp(lease['date'])
+ duration = datetime.timedelta(seconds=int(lease['duration']))
+ sliver_node['expires'] = start_time + duration
+ sliver_node['sliver_id'] = Xrn(sliver_hrn,
+ type='sliver').urn
+ # frontend SSH hostname
+ sliver_node['hostname'] = '.'.join(network_address[1:])
+ # user login
+ sliver_node['name'] = lease['owner']
+ slivers.append(sliver_node)
+ return slivers
+
+ def _delete_db_lease(self, job_id):
+ """ Delete lease table row in SFA database """
+ logger.warning("iotlabdriver _delete_db_lease lease job_id : %s"
+ % job_id)
+ self.driver.api.dbsession().query(LeaseTable).filter(
+ LeaseTable.job_id == job_id).delete()
+ self.driver.api.dbsession().commit()
def describe(self, urns, version=None, options=None):
"""
- Retrieve a manifest RSpec describing the resources contained by the
- named entities, e.g. a single slice or a set of the slivers in a slice.
- This listing and description should be sufficiently descriptive to allow
- experimenters to use the resources.
-
- :param urns: If a slice urn is supplied and there are no slivers in the
- given slice at this aggregate, then geni_rspec shall be a valid
- manifest RSpec, containing no node elements - no resources.
- :type urns: list or strings
- :param options: various options. the valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}}
- :type options: dictionary
+ describe method returns slice slivers (allocated resources) and leases
+ (OAR job submission). We search in lease table of SFA database all OAR
+ jobs id for this slice and match OAR jobs with state Waiting or
+ Running. If OAR job id doesn't exist the experiment is terminated and
+ we delete the database table entry. Otherwise we add slivers and leases
+ in the response
+
+ :returns:
+ geni_slivers : a list of allocated slivers with information about
+ their allocation and operational state
+ geni_urn : the URN of the slice in which the sliver has been
+ allocated
+ geni_rspec: a RSpec describing the allocated slivers and leases
+ :rtype: dict
- :returns: On success returns the following dictionary {geni_rspec:
- <geni.rspec, a Manifest RSpec>, geni_urn: <string slice urn of the
- containing slice>, geni_slivers:{ geni_sliver_urn:
- <string sliver urn>, geni_expires: <dateTime.rfc3339 allocation
- expiration string, as in geni_expires from SliversStatus>,
- geni_allocation_status: <string sliver state - e.g. geni_allocated
- or geni_provisioned >, geni_operational_status:
- <string sliver operational state>, geni_error: <optional string.
- The field may be omitted entirely but may not be null/None,
- explaining any failure for a sliver.>}
+ :Example:
+ <rspec>
+ ...
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa"
+ component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"
+ client_id="m3-10.grenoble.iot-lab.info"
+ sliver_id="urn:publicid:IDN+iotlab+sliver+9953-m3-10"
+ exclusive="true" component_name="m3-10.grenoble.iot-lab.info">
+ <hardware_type name="iotlab-node"/>
+ <location country="France"/>
+ <granularity grain="30"/>
+ <sliver_type name="iotlab-exclusive"/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792428" duration="29">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/>
+ </lease>
+ ...
+ </rspec>
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
"""
- if options is None: options={}
+ # pylint:disable=R0914,W0212
+ logger.warning("iotlabaggregate describe")
+ logger.warning("iotlabaggregate describe urns : %s" % urns)
+ if not options:
+ options = {}
version_manager = VersionManager()
version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
+ rspec_version = version_manager._get_version(version.type,
+ version.version,
+ 'manifest')
rspec = RSpec(version=rspec_version, user_options=options)
-
- # get slivers
+ xrn = Xrn(urns[0])
geni_slivers = []
- slivers = self.get_slivers(urns, options)
- if slivers:
- rspec_expires = datetime_to_string(utcparse(slivers[0]['expires']))
- else:
- rspec_expires = datetime_to_string(utcparse(time.time()))
- rspec.xml.set('expires', rspec_expires)
- # lookup the sliver allocations
- geni_urn = urns[0]
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- constraint = SliverAllocation.sliver_id.in_(sliver_ids)
- query = self.driver.api.dbsession().query(SliverAllocation)
- sliver_allocations = query.filter((constraint)).all()
- sliver_allocation_dict = {}
- for sliver_allocation in sliver_allocations:
- geni_urn = sliver_allocation.slice_urn
- sliver_allocation_dict[sliver_allocation.sliver_id] = \
- sliver_allocation
- # JORDAN get the option list_leases if we want to have the leases in describe
- show_leases = options.get('list_leases')
- if show_leases in ['resources', 'all']:
- #if not options.get('list_leases') or options['list_leases'] != 'leases':
- # add slivers
- nodes_dict = {}
- for sliver in slivers:
- nodes_dict[sliver['node_id']] = sliver
+ nodes = self.driver.shell.get_nodes()
+ reserved_nodes = self.driver.shell.get_reserved_nodes()
+ if 'error' not in nodes and 'error' not in reserved_nodes:
+ # find OAR jobs id for one slice in SFA database
+ db_leases = [(lease.job_id, lease.slice_hrn)
+ for lease in self.driver.api.dbsession()
+ .query(LeaseTable)
+ .filter(LeaseTable.slice_hrn == xrn.hrn).all()]
+
+ leases = []
+ for job_id, slice_hrn in db_leases:
+ # OAR job terminated, we delete entry in database
+ if job_id not in reserved_nodes:
+ self._delete_db_lease(job_id)
+ else:
+ # onelab slice = job submission from OneLAB
+ lease = reserved_nodes[job_id]
+ lease['slice_id'] = hrn_to_urn(slice_hrn, 'slice')
+ leases.append(lease)
+
+ # get slivers
+ slivers = self.get_slivers(urns, leases, nodes)
+ if slivers:
+ date = utcparse(slivers[0]['expires'])
+ rspec_expires = datetime_to_string(date)
+ else:
+ rspec_expires = datetime_to_string(utcparse(time.time()))
+ rspec.xml.set('expires', rspec_expires)
+
rspec_nodes = []
+
for sliver in slivers:
- rspec_node = self.sliver_to_rspec_node(sliver,
- sliver_allocation_dict)
+ rspec_node = self.sliver_to_rspec_node(sliver)
rspec_nodes.append(rspec_node)
- geni_sliver = self.rspec_node_to_geni_sliver(rspec_node,
- sliver_allocation_dict)
+ geni_sliver = self.rspec_node_to_geni_sliver(rspec_node)
geni_slivers.append(geni_sliver)
+ logger.warning("iotlabaggregate describe geni_slivers %s" %
+ geni_slivers)
rspec.version.add_nodes(rspec_nodes)
- if show_leases in ['leases', 'all']:
- #if not options.get('list_leases') or options['list_leases'] == 'resources':
- if slivers:
- leases = self.get_leases(slice=slivers[0])
- logger.debug("JORDAN: getting leases from slice: %r" % slivers[0])
- rspec.version.add_leases(leases)
+ rspec_leases = self.leases_to_rspec_leases(leases)
+ logger.warning("iotlabaggregate describe rspec_leases %s" %
+ rspec_leases)
+ rspec.version.add_leases(rspec_leases)
- return {'geni_urn': geni_urn,
+ return {'geni_urn': urns[0],
'geni_rspec': rspec.toxml(),
'geni_slivers': geni_slivers}
-"""
-Implements what a driver should provide for SFA to work.
-"""
-from datetime import datetime
-from sfa.util.faults import SliverDoesNotExist, Forbidden
-from sfa.util.sfalogging import logger
-
-from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
-from sfa.util.sfatime import utcparse, datetime_to_string
-from sfa.trust.certificate import Keypair, convert_public_key
-
-from sfa.trust.hierarchy import Hierarchy
-from sfa.trust.gid import create_uuid
+# -*- coding:utf-8 -*-
+""" driver class management """
-from sfa.managers.driver import Driver
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, urn_to_hrn
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
+from sfa.managers.driver import Driver
+from sfa.iotlab.iotlabshell import IotLABShell
+from sfa.iotlab.iotlabaggregate import IotLABAggregate
+from sfa.iotlab.iotlablease import LeaseTable
-from sfa.iotlab.iotlabxrn import IotlabXrn, xrn_object, xrn_to_hostname
-from sfa.util.xrn import Xrn, hrn_to_urn, get_authority, urn_to_hrn
-from sfa.iotlab.iotlabaggregate import IotlabAggregate
-
-from sfa.iotlab.iotlabslices import IotlabSlices
-
-from sfa.trust.credential import Credential
-from sfa.storage.model import SliverAllocation
-
-from sfa.iotlab.iotlabshell import IotlabShell
-from sqlalchemy.orm import joinedload
-from sfa.iotlab.iotlabpostgres import LeaseTableXP
-
-class IotlabDriver(Driver):
- """ Iotlab Driver class inherited from Driver generic class.
-
- Contains methods compliant with the SFA standard and the testbed
- infrastructure (calls to LDAP and OAR).
-
- .. seealso::: Driver class
+class IotLabDriver(Driver):
+ """
+ SFA driver for Iot-LAB testbed
"""
- def __init__(self, api):
- """
-
- Sets the iotlab SFA config parameters,
- instanciates the testbed api .
-
- :param api: SfaApi configuration object. Holds reference to the
- database.
- :type api: SfaApi object
- """
+ def __init__(self, api):
Driver.__init__(self, api)
- self.api = api
config = api.config
- self.testbed_shell = IotlabShell(config)
+ self.api = api
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.shell = IotLABShell()
+ # need by sfa driver
self.cache = None
- def GetPeers(self, peer_filter=None ):
- """ Gathers registered authorities in SFA DB and looks for specific peer
- if peer_filter is specified.
- :param peer_filter: name of the site authority looked for.
- :type peer_filter: string
- :returns: list of records.
-
- """
-
- existing_records = {}
- existing_hrns_by_types = {}
- logger.debug("IOTLAB_API \tGetPeers peer_filter %s " % (peer_filter))
- query = self.api.dbsession().query(RegRecord)
- all_records = query.filter(RegRecord.type.like('%authority%')).all()
-
- for record in all_records:
- existing_records[(record.hrn, record.type)] = record
- if record.type not in existing_hrns_by_types:
- existing_hrns_by_types[record.type] = [record.hrn]
- else:
- existing_hrns_by_types[record.type].append(record.hrn)
-
- logger.debug("IOTLAB_API \tGetPeer\texisting_hrns_by_types %s "
- % (existing_hrns_by_types))
- records_list = []
-
- try:
- if peer_filter:
- records_list.append(existing_records[(peer_filter,
- 'authority')])
- else:
- for hrn in existing_hrns_by_types['authority']:
- records_list.append(existing_records[(hrn, 'authority')])
-
- logger.debug("IOTLAB_API \tGetPeer \trecords_list %s "
- % (records_list))
-
- except KeyError:
- pass
-
- return_records = records_list
- logger.debug("IOTLAB_API \tGetPeer return_records %s "
- % (return_records))
- return return_records
-
- def GetKeys(self, key_filter=None):
- """Returns a dict of dict based on the key string. Each dict entry
- contains the key id, the ssh key, the user's email and the
- user's hrn.
- If key_filter is specified and is an array of key identifiers,
- only keys matching the filter will be returned.
-
- Admin may query all keys. Non-admins may only query their own keys.
- FROM PLC API DOC
-
- :returns: dict with ssh key as key and dicts as value.
- :rtype: dict
- """
- query = self.api.dbsession().query(RegKey)
- if key_filter is None:
- keys = query.options(joinedload('reg_user')).all()
- else:
- constraint = RegKey.key.in_(key_filter)
- keys = query.options(joinedload('reg_user')).filter(constraint).all()
-
- key_dict = {}
- for key in keys:
- key_dict[key.key] = {'key_id': key.key_id, 'key': key.key,
- 'email': key.reg_user.email,
- 'hrn': key.reg_user.hrn}
-
- #ldap_rslt = self.ldap.LdapSearch({'enabled']=True})
- #user_by_email = dict((user[1]['mail'][0], user[1]['sshPublicKey']) \
- #for user in ldap_rslt)
-
- logger.debug("IOTLAB_API GetKeys -key_dict %s \r\n " % (key_dict))
- return key_dict
-
-
-
- def AddPerson(self, record):
- """
-
- Adds a new account. Any fields specified in records are used,
- otherwise defaults are used. Creates an appropriate login by calling
- LdapAddUser.
-
- :param record: dictionary with the sfa user's properties.
- :returns: a dicitonary with the status. If successful, the dictionary
- boolean is set to True and there is a 'uid' key with the new login
- added to LDAP, otherwise the bool is set to False and a key
- 'message' is in the dictionary, with the error message.
- :rtype: dict
-
- """
- ret = self.testbed_shell.ldap.LdapAddUser(record)
-
- if ret['bool'] is True:
- #record['hrn'] = self.testbed_shell.root_auth + '.' + ret['uid']
- logger.debug("IOTLAB_API AddPerson return code %s record %s "
- % (ret, record))
- #self.__add_person_to_db(record)
- return ret
-
- def add_person_to_db(self, user_dict):
- """
- Add a federated user straight to db when the user issues a lease
- request with iotlab nodes and that he has not registered with iotlab
- yet (that is he does not have a LDAP entry yet).
- Uses parts of the routines in IotlabImport when importing user from
- LDAP. Called by AddPerson, right after LdapAddUser.
- :param user_dict: Must contain email, hrn and pkey to get a GID
- and be added to the SFA db.
- :type user_dict: dict
-
- """
- query = self.api.dbsession().query(RegUser)
- check_if_exists = query.filter_by(email = user_dict['email']).first()
- #user doesn't exists
- if not check_if_exists:
- logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
- " %(user_dict))
- hrn = user_dict['hrn']
- person_urn = hrn_to_urn(hrn, 'user')
- try:
- pubkey = user_dict['pkey']
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- logger.warn('__add_person_to_db: no public key or unable to convert public \
- key for %s' %(hrn ))
- pkey = Keypair(create=True)
-
-
- if pubkey is not None and pkey is not None :
- hierarchy = Hierarchy()
- # We fake the parent in order to be able to create a valid GID
- person_gid = hierarchy.create_gid(person_urn, create_uuid(), \
- pkey, force_parent='iotlab')
- if user_dict['email']:
- logger.debug("__add_person_to_db \r\n \r\n \
- IOTLAB IMPORTER PERSON EMAIL OK email %s "\
- %(user_dict['email']))
- person_gid.set_email(user_dict['email'])
-
- user_record = RegUser(hrn=hrn , pointer= '-1', \
- authority=get_authority(hrn), \
- email=user_dict['email'], gid = person_gid)
- #user_record.reg_keys = [RegKey(user_dict['pkey'])]
- user_record.just_created()
- self.api.dbsession().add (user_record)
- self.api.dbsession().commit()
- return
-
-
-
- def _sql_get_slice_info(self, slice_filter):
- """
- Get the slice record based on the slice hrn. Fetch the record of the
- user associated with the slice by using joinedload based on the
- reg_researchers relationship.
-
- :param slice_filter: the slice hrn we are looking for
- :type slice_filter: string
- :returns: the slice record enhanced with the user's information if the
- slice was found, None it wasn't.
-
- :rtype: dict or None.
- """
- #DO NOT USE RegSlice - reg_researchers to get the hrn
- #of the user otherwise will mess up the RegRecord in
- #Resolve, don't know why - SA 08/08/2012
-
- #Only one entry for one user = one slice in testbed_xp table
- #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
-
- raw_slicerec = self.api.dbsession().query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn=slice_filter).first()
- #raw_slicerec = self.api.dbsession().query(RegRecord).filter_by(hrn = slice_filter).first()
- if raw_slicerec:
- #load_reg_researchers
- #raw_slicerec.reg_researchers
- raw_slicerec = raw_slicerec.__dict__
- logger.debug(" IOTLAB_API \t _sql_get_slice_info slice_filter %s \
- raw_slicerec %s" % (slice_filter, raw_slicerec))
- slicerec = raw_slicerec
- #only one researcher per slice so take the first one
- #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
- #del slicerec['reg_researchers']['_sa_instance_state']
- return slicerec
-
- else:
- return None
-
- def _sql_get_slice_info_from_user(self, slice_filter):
- """
- Get the slice record based on the user recordid by using a joinedload
- on the relationship reg_slices_as_researcher. Format the sql record
- into a dict with the mandatory fields for user and slice.
- :returns: dict with slice record and user record if the record was found
- based on the user's id, None if not..
- :rtype:dict or None..
- """
- #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- raw_slicerec = self.api.dbsession().query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id=slice_filter).first()
- #raw_slicerec = self.api.dbsession().query(RegRecord).filter_by(record_id = slice_filter).first()
- #Put it in correct order
- user_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'email', 'pointer']
- slice_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'pointer']
- if raw_slicerec:
- #raw_slicerec.reg_slices_as_researcher
- raw_slicerec = raw_slicerec.__dict__
- slicerec = {}
- slicerec = \
- dict([(k, raw_slicerec[
- 'reg_slices_as_researcher'][0].__dict__[k])
- for k in slice_needed_fields])
- slicerec['reg_researchers'] = dict([(k, raw_slicerec[k])
- for k in user_needed_fields])
- #TODO Handle multiple slices for one user SA 10/12/12
- #for now only take the first slice record associated to the rec user
- ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
- #del raw_slicerec['reg_slices_as_researcher']
- #slicerec['reg_researchers'] = raw_slicerec
- ##del slicerec['_sa_instance_state']
-
- return slicerec
-
- else:
- return None
-
-
-
- def _get_slice_records(self, slice_filter=None,
- slice_filter_type=None):
- """
- Get the slice record depending on the slice filter and its type.
- :param slice_filter: Can be either the slice hrn or the user's record
- id.
- :type slice_filter: string
- :param slice_filter_type: describes the slice filter type used, can be
- slice_hrn or record_id_user
- :type: string
- :returns: the slice record
- :rtype:dict
- .. seealso::_sql_get_slice_info_from_user
- .. seealso:: _sql_get_slice_info
- """
-
- #Get list of slices based on the slice hrn
- if slice_filter_type == 'slice_hrn':
-
- #if get_authority(slice_filter) == self.root_auth:
- #login = slice_filter.split(".")[1].split("_")[0]
-
- slicerec = self._sql_get_slice_info(slice_filter)
-
- if slicerec is None:
- return None
- #return login, None
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
-
- slicerec = self._sql_get_slice_info_from_user(slice_filter)
-
- if slicerec:
- fixed_slicerec_dict = slicerec
- #At this point if there is no login it means
- #record_id_user filter has been used for filtering
- #if login is None :
- ##If theslice record is from iotlab
- #if fixed_slicerec_dict['peer_authority'] is None:
- #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
- #return login, fixed_slicerec_dict
- return fixed_slicerec_dict
- else:
- return None
-
-
-
- def GetSlices(self, slice_filter=None, slice_filter_type=None,
- login=None):
- """Get the slice records from the iotlab db and add lease information
- if any.
-
- :param slice_filter: can be the slice hrn or slice record id in the db
- depending on the slice_filter_type.
- :param slice_filter_type: defines the type of the filtering used, Can be
- either 'slice_hrn' or "record_id'.
- :type slice_filter: string
- :type slice_filter_type: string
- :returns: a slice dict if slice_filter and slice_filter_type
- are specified and a matching entry is found in the db. The result
- is put into a list.Or a list of slice dictionnaries if no filters
- arespecified.
-
- :rtype: list
-
- """
- #login = None
- authorized_filter_types_list = ['slice_hrn', 'record_id_user']
- return_slicerec_dictlist = []
-
- #First try to get information on the slice based on the filter provided
- if slice_filter_type in authorized_filter_types_list:
- fixed_slicerec_dict = self._get_slice_records(slice_filter,
- slice_filter_type)
- # if the slice was not found in the sfa db
- if fixed_slicerec_dict is None:
- return return_slicerec_dictlist
-
- slice_hrn = fixed_slicerec_dict['hrn']
-
- logger.debug(" IOTLAB_API \tGetSlices login %s \
- slice record %s slice_filter %s \
- slice_filter_type %s " % (login,
- fixed_slicerec_dict, slice_filter,
- slice_filter_type))
-
-
- #Now we have the slice record fixed_slicerec_dict, get the
- #jobs associated to this slice
- leases_list = []
-
- leases_list = self.GetLeases(login=login)
- #If no job is running or no job scheduled
- #return only the slice record
- if leases_list == [] and fixed_slicerec_dict:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
-
- # if the jobs running don't belong to the user/slice we are looking
- # for
- leases_hrn = [lease['slice_hrn'] for lease in leases_list]
- if slice_hrn not in leases_hrn:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
- #If several jobs for one slice , put the slice record into
- # each lease information dict
- for lease in leases_list:
- slicerec_dict = {}
- logger.debug("IOTLAB_API.PY \tGetSlices slice_filter %s \
- \t lease['slice_hrn'] %s"
- % (slice_filter, lease['slice_hrn']))
- if lease['slice_hrn'] == slice_hrn:
- slicerec_dict['oar_job_id'] = lease['lease_id']
- #Update lease dict with the slice record
- if fixed_slicerec_dict:
- fixed_slicerec_dict['oar_job_id'] = []
- fixed_slicerec_dict['oar_job_id'].append(
- slicerec_dict['oar_job_id'])
- slicerec_dict.update(fixed_slicerec_dict)
- #slicerec_dict.update({'hrn':\
- #str(fixed_slicerec_dict['slice_hrn'])})
- slicerec_dict['slice_hrn'] = lease['slice_hrn']
- slicerec_dict['hrn'] = lease['slice_hrn']
- slicerec_dict['user'] = lease['user']
- slicerec_dict.update(
- {'list_node_ids':
- {'hostname': lease['reserved_nodes']}})
- slicerec_dict.update({'node_ids': lease['reserved_nodes']})
-
-
-
- return_slicerec_dictlist.append(slicerec_dict)
-
- logger.debug("IOTLAB_API.PY \tGetSlices \
- slicerec_dict %s return_slicerec_dictlist %s \
- lease['reserved_nodes'] \
- %s" % (slicerec_dict, return_slicerec_dictlist,
- lease['reserved_nodes']))
-
- logger.debug("IOTLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s"
- % (return_slicerec_dictlist))
-
- return return_slicerec_dictlist
-
-
- else:
- #Get all slices from the iotlab sfa database ,
- #put them in dict format
- #query_slice_list = dbsession.query(RegRecord).all()
- query_slice_list = \
- self.api.dbsession().query(RegSlice).options(joinedload('reg_researchers')).all()
-
- for record in query_slice_list:
- tmp = record.__dict__
- tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- #del tmp['reg_researchers']['_sa_instance_state']
- return_slicerec_dictlist.append(tmp)
- #return_slicerec_dictlist.append(record.__dict__)
-
- #Get all the jobs reserved nodes
- leases_list = self.testbed_shell.GetReservedNodes()
-
- for fixed_slicerec_dict in return_slicerec_dictlist:
- slicerec_dict = {}
- #Check if the slice belongs to a iotlab user
- if fixed_slicerec_dict['peer_authority'] is None:
- owner = fixed_slicerec_dict['hrn'].split(
- ".")[1].split("_")[0]
- else:
- owner = None
- for lease in leases_list:
- if owner == lease['user']:
- slicerec_dict['oar_job_id'] = lease['lease_id']
-
- #for reserved_node in lease['reserved_nodes']:
- logger.debug("IOTLAB_API.PY \tGetSlices lease %s "
- % (lease))
- slicerec_dict.update(fixed_slicerec_dict)
- slicerec_dict.update({'node_ids':
- lease['reserved_nodes']})
- slicerec_dict.update({'list_node_ids':
- {'hostname':
- lease['reserved_nodes']}})
-
- #slicerec_dict.update({'hrn':\
- #str(fixed_slicerec_dict['slice_hrn'])})
- #return_slicerec_dictlist.append(slicerec_dict)
- fixed_slicerec_dict.update(slicerec_dict)
-
- logger.debug("IOTLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s \t slice_filter %s " \
- %(return_slicerec_dictlist, slice_filter))
-
- return return_slicerec_dictlist
-
- def AddLeases(self, hostname_list, slice_record,
- lease_start_time, lease_duration):
-
- """Creates a job in OAR corresponding to the information provided
- as parameters. Adds the job id and the slice hrn in the iotlab
- database so that we are able to know which slice has which nodes.
-
- :param hostname_list: list of nodes' OAR hostnames.
- :param slice_record: sfa slice record, must contain login and hrn.
- :param lease_start_time: starting time , unix timestamp format
- :param lease_duration: duration in minutes
-
- :type hostname_list: list
- :type slice_record: dict
- :type lease_start_time: integer
- :type lease_duration: integer
- :returns: job_id, can be None if the job request failed.
-
- """
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases hostname_list %s \
- slice_record %s lease_start_time %s lease_duration %s "\
- %( hostname_list, slice_record , lease_start_time, \
- lease_duration))
-
- #tmp = slice_record['reg-researchers'][0].split(".")
- username = slice_record['login']
- #username = tmp[(len(tmp)-1)]
- job_id = self.testbed_shell.LaunchExperimentOnOAR(hostname_list, \
- slice_record['hrn'], \
- lease_start_time, lease_duration, \
- username)
- if job_id is not None:
- start_time = \
- datetime.fromtimestamp(int(lease_start_time)).\
- strftime(self.testbed_shell.time_format)
- end_time = lease_start_time + lease_duration
-
-
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases TURN ON LOGGING SQL \
- %s %s %s "%(slice_record['hrn'], job_id, end_time))
-
-
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases %s %s %s " \
- %(type(slice_record['hrn']), type(job_id), type(end_time)))
-
- iotlab_ex_row = LeaseTableXP(slice_hrn = slice_record['hrn'],
- experiment_id=job_id,
- end_time= end_time)
-
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases iotlab_ex_row %s" \
- %(iotlab_ex_row))
- self.api.dbsession().add(iotlab_ex_row)
- self.api.dbsession().commit()
-
- logger.debug("IOTLAB_API \t AddLeases hostname_list start_time %s "
- %(start_time))
-
- return job_id
-
- def GetLeases(self, lease_filter_dict=None, login=None):
- """
-
- Get the list of leases from OAR with complete information
- about which slice owns which jobs and nodes.
- Two purposes:
- -Fetch all the jobs from OAR (running, waiting..)
- complete the reservation information with slice hrn
- found in lease_table . If not available in the table,
- assume it is a iotlab slice.
- -Updates the iotlab table, deleting jobs when necessary.
-
- :returns: reservation_list, list of dictionaries with 'lease_id',
- 'reserved_nodes','slice_id', 'state', 'user', 'component_id_list',
- 'slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list
-
- """
-
- unfiltered_reservation_list = self.testbed_shell.GetReservedNodes(login)
-
- reservation_list = []
- #Find the slice associated with this user iotlab ldap uid
- logger.debug(" IOTLAB_API.PY \tGetLeases login %s\
- unfiltered_reservation_list %s "
- % (login, unfiltered_reservation_list))
- #Create user dict first to avoid looking several times for
- #the same user in LDAP SA 27/07/12
- job_oar_list = []
- jobs_psql_query = self.api.dbsession().query(LeaseTableXP).all()
- jobs_psql_dict = dict([(row.experiment_id, row.__dict__)
- for row in jobs_psql_query])
- #jobs_psql_dict = jobs_psql_dict)
- logger.debug("IOTLAB_API \tGetLeases jobs_psql_dict %s"
- % (jobs_psql_dict))
- jobs_psql_id_list = [row.experiment_id for row in jobs_psql_query]
-
- for resa in unfiltered_reservation_list:
- logger.debug("IOTLAB_API \tGetLeases USER %s"
- % (resa['user']))
- #Construct list of jobs (runing, waiting..) in oar
- job_oar_list.append(resa['lease_id'])
- #If there is information on the job in IOTLAB DB ]
- #(slice used and job id)
- if resa['lease_id'] in jobs_psql_dict:
- job_info = jobs_psql_dict[resa['lease_id']]
- logger.debug("IOTLAB_API \tGetLeases job_info %s"
- % (job_info))
- resa['slice_hrn'] = job_info['slice_hrn']
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a iotlab slice:
- else:
- resa['slice_id'] = hrn_to_urn(self.testbed_shell.root_auth \
- + '.' + resa['user'] + "_slice",
- 'slice')
- resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
-
- resa['component_id_list'] = []
- #Transform the hostnames into urns (component ids)
- for node in resa['reserved_nodes']:
-
- iotlab_xrn = xrn_object(self.testbed_shell.root_auth, node)
- resa['component_id_list'].append(iotlab_xrn.urn)
-
- if lease_filter_dict:
- logger.debug("IOTLAB_API \tGetLeases \
- \r\n leasefilter %s" % ( lease_filter_dict))
-
- # filter_dict_functions = {
- # 'slice_hrn' : IotlabShell.filter_lease_name,
- # 't_from' : IotlabShell.filter_lease_start_time
- # }
- reservation_list = list(unfiltered_reservation_list)
- for filter_type in lease_filter_dict:
- logger.debug("IOTLAB_API \tGetLeases reservation_list %s" \
- % (reservation_list))
- reservation_list = self.testbed_shell.filter_lease(
- reservation_list,filter_type,
- lease_filter_dict[filter_type] )
-
- # Filter the reservation list with a maximum timespan so that the
- # leases and jobs running after this timestamp do not appear
- # in the result leases.
- # if 'start_time' in :
- # if resa['start_time'] < lease_filter_dict['start_time']:
- # reservation_list.append(resa)
-
-
- # if 'name' in lease_filter_dict and \
- # lease_filter_dict['name'] == resa['slice_hrn']:
- # reservation_list.append(resa)
-
-
- if lease_filter_dict is None:
- reservation_list = unfiltered_reservation_list
-
- self.update_experiments_in_lease_table(job_oar_list, jobs_psql_id_list)
-
- logger.debug(" IOTLAB_API.PY \tGetLeases reservation_list %s"
- % (reservation_list))
- return reservation_list
-
-
-
- def update_experiments_in_lease_table(self,
- experiment_list_from_testbed, experiment_list_in_db):
- """ Cleans the lease_table by deleting expired and cancelled jobs.
-
- Compares the list of experiment ids given by the testbed with the
- experiment ids that are already in the database, deletes the
- experiments that are no longer in the testbed experiment id list.
-
- :param experiment_list_from_testbed: list of experiment ids coming
- from testbed
- :type experiment_list_from_testbed: list
- :param experiment_list_in_db: list of experiment ids from the sfa
- additionnal database.
- :type experiment_list_in_db: list
-
- :returns: None
- """
- #Turn the list into a set
- set_experiment_list_in_db = set(experiment_list_in_db)
-
- kept_experiments = set(experiment_list_from_testbed).intersection(set_experiment_list_in_db)
- logger.debug("\r\n \t update_experiments_in_lease_table \
- experiment_list_in_db %s \r\n \
- experiment_list_from_testbed %s \
- kept_experiments %s "
- % (set_experiment_list_in_db,
- experiment_list_from_testbed, kept_experiments))
- deleted_experiments = set_experiment_list_in_db.difference(
- kept_experiments)
- deleted_experiments = list(deleted_experiments)
- if len(deleted_experiments) > 0:
- request = self.api.dbsession().query(LeaseTableXP)
- request.filter(LeaseTableXP.experiment_id.in_(deleted_experiments)).delete(synchronize_session='fetch')
- self.api.dbsession().commit()
- return
-
-
- def AddSlice(self, slice_record, user_record):
- """
-
- Add slice to the local iotlab sfa tables if the slice comes
- from a federated site and is not yet in the iotlab sfa DB,
- although the user has already a LDAP login.
- Called by verify_slice during lease/sliver creation.
-
- :param slice_record: record of slice, must contain hrn, gid, slice_id
- and authority of the slice.
- :type slice_record: dictionary
- :param user_record: record of the user
- :type user_record: RegUser
-
- """
-
- sfa_record = RegSlice(hrn=slice_record['hrn'],
- gid=slice_record['gid'],
- #pointer=slice_record['slice_id'],
- authority=slice_record['authority'])
- logger.debug("IOTLAB_API.PY AddSlice sfa_record %s user_record %s"
- % (sfa_record, user_record))
- sfa_record.just_created()
- self.api.dbsession().add(sfa_record)
- self.api.dbsession().commit()
- #Update the reg-researchers dependency table
- sfa_record.reg_researchers = [user_record]
- self.api.dbsession().commit()
-
- return
-
- def augment_records_with_testbed_info(self, record_list):
- """
-
- Adds specific testbed info to the records.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- """
- return self.fill_record_info(record_list)
-
- def fill_record_info(self, record_list):
- """
-
- For each SFA record, fill in the iotlab specific and SFA specific
- fields in the record.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- .. warning:: Should not be modifying record_list directly because modi
- fication are kept outside the method's scope. Howerver, there is no
- other way to do it given the way it's called in registry manager.
-
- """
-
- logger.debug("IOTLABDRIVER \tfill_record_info records %s "
- % (record_list))
- if not isinstance(record_list, list):
- record_list = [record_list]
-
- try:
- for record in record_list:
-
- if str(record['type']) == 'node':
- # look for node info using GetNodes
- # the record is about one node only
- filter_dict = {'hrn': [record['hrn']]}
- node_info = self.testbed_shell.GetNodes(filter_dict)
- # the node_info is about one node only, but it is formatted
- # as a list
- record.update(node_info[0])
- logger.debug("IOTLABDRIVER.PY \t \
- fill_record_info NODE" % (record))
-
- #If the record is a SFA slice record, then add information
- #about the user of this slice. This kind of
- #information is in the Iotlab's DB.
- if str(record['type']) == 'slice':
- if 'reg_researchers' in record and isinstance(record
- ['reg_researchers'],
- list):
- record['reg_researchers'] = \
- record['reg_researchers'][0].__dict__
- record.update(
- {'PI': [record['reg_researchers']['hrn']],
- 'researcher': [record['reg_researchers']['hrn']],
- 'name': record['hrn'],
- 'oar_job_id': [],
- 'node_ids': [],
- 'person_ids': [record['reg_researchers']
- ['record_id']],
- # For client_helper.py compatibility
- 'geni_urn': '',
- # For client_helper.py compatibility
- 'keys': '',
- # For client_helper.py compatibility
- 'key_ids': ''})
-
- #Get iotlab slice record and oar job id if any.
- recslice_list = self.GetSlices(
- slice_filter=str(record['hrn']),
- slice_filter_type='slice_hrn')
-
- logger.debug("IOTLABDRIVER \tfill_record_info \
- TYPE SLICE RECUSER record['hrn'] %s record['oar_job_id']\
- %s " % (record['hrn'], record['oar_job_id']))
- del record['reg_researchers']
- try:
- for rec in recslice_list:
- logger.debug("IOTLABDRIVER\r\n \t \
- fill_record_info oar_job_id %s "
- % (rec['oar_job_id']))
-
- record['node_ids'] = [self.testbed_shell.root_auth +
- '.' + hostname for hostname
- in rec['node_ids']]
- except KeyError:
- pass
-
- logger.debug("IOTLABDRIVER.PY \t fill_record_info SLICE \
- recslice_list %s \r\n \t RECORD %s \r\n \
- \r\n" % (recslice_list, record))
-
- if str(record['type']) == 'user':
- #The record is a SFA user record.
- #Get the information about his slice from Iotlab's DB
- #and add it to the user record.
- recslice_list = self.GetSlices(
- slice_filter=record['record_id'],
- slice_filter_type='record_id_user')
-
- logger.debug("IOTLABDRIVER.PY \t fill_record_info \
- TYPE USER recslice_list %s \r\n \t RECORD %s \r\n"
- % (recslice_list, record))
- #Append slice record in records list,
- #therefore fetches user and slice info again(one more loop)
- #Will update PIs and researcher for the slice
-
- recuser = recslice_list[0]['reg_researchers']
- logger.debug("IOTLABDRIVER.PY \t fill_record_info USER \
- recuser %s \r\n \r\n" % (recuser))
- recslice = {}
- recslice = recslice_list[0]
- recslice.update(
- {'PI': [recuser['hrn']],
- 'researcher': [recuser['hrn']],
- 'name': recuser['hrn'],
- 'node_ids': [],
- 'oar_job_id': [],
- 'person_ids': [recuser['record_id']]})
- try:
- for rec in recslice_list:
- recslice['oar_job_id'].append(rec['oar_job_id'])
- except KeyError:
- pass
-
- recslice.update({'type': 'slice',
- 'hrn': recslice_list[0]['hrn']})
-
- #GetPersons takes [] as filters
- user_iotlab = self.testbed_shell.GetPersons([record])
-
- record.update(user_iotlab[0])
- #For client_helper.py compatibility
- record.update(
- {'geni_urn': '',
- 'keys': '',
- 'key_ids': ''})
- record_list.append(recslice)
-
- logger.debug("IOTLABDRIVER.PY \t \
- fill_record_info ADDING SLICE\
- INFO TO USER records %s" % (record_list))
-
- except TypeError, error:
- logger.log_exc("IOTLABDRIVER \t fill_record_info EXCEPTION %s"
- % (error))
-
- return record_list
-
- def sliver_status(self, slice_urn, slice_hrn):
- """
- Receive a status request for slice named urn/hrn
- urn:publicid:IDN+iotlab+nturro_slice hrn iotlab.nturro_slice
- shall return a structure as described in
- http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
- NT : not sure if we should implement this or not, but used by sface.
-
- :param slice_urn: slice urn
- :type slice_urn: string
- :param slice_hrn: slice hrn
- :type slice_hrn: string
-
- """
-
- #First get the slice with the slice hrn
- slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if len(slice_list) == 0:
- raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
-
- #Used for fetching the user info witch comes along the slice info
- one_slice = slice_list[0]
-
- #Make a list of all the nodes hostnames in use for this slice
- slice_nodes_list = []
- slice_nodes_list = one_slice['node_ids']
- #Get all the corresponding nodes details
- nodes_all = self.testbed_shell.GetNodes(
- {'hostname': slice_nodes_list},
- ['node_id', 'hostname', 'site', 'boot_state'])
- nodeall_byhostname = dict([(one_node['hostname'], one_node)
- for one_node in nodes_all])
-
- for single_slice in slice_list:
- #For compatibility
- top_level_status = 'empty'
- result = {}
- result.fromkeys(
- ['geni_urn', 'geni_error', 'iotlab_login', 'geni_status',
- 'geni_resources'], None)
- # result.fromkeys(\
- # ['geni_urn','geni_error', 'pl_login','geni_status',
- # 'geni_resources'], None)
- # result['pl_login'] = one_slice['reg_researchers'][0].hrn
- result['iotlab_login'] = one_slice['user']
- logger.debug("Slabdriver - sliver_status Sliver status \
- urn %s hrn %s single_slice %s \r\n "
- % (slice_urn, slice_hrn, single_slice))
-
- if 'node_ids' not in single_slice:
- #No job in the slice
- result['geni_status'] = top_level_status
- result['geni_resources'] = []
- return result
-
- top_level_status = 'ready'
-
- #A job is running on Iotlab for this slice
- # report about the local nodes that are in the slice only
-
- result['geni_urn'] = slice_urn
-
- resources = []
- for node_hostname in single_slice['node_ids']:
- res = {}
- res['iotlab_hostname'] = node_hostname
- res['iotlab_boot_state'] = \
- nodeall_byhostname[node_hostname]['boot_state']
-
- #res['pl_hostname'] = node['hostname']
- #res['pl_boot_state'] = \
- #nodeall_byhostname[node['hostname']]['boot_state']
- #res['pl_last_contact'] = strftime(self.time_format, \
- #gmtime(float(timestamp)))
- sliver_id = Xrn(
- slice_urn, type='slice',
- id=nodeall_byhostname[node_hostname]['node_id']).urn
-
- res['geni_urn'] = sliver_id
- #node_name = node['hostname']
- if nodeall_byhostname[node_hostname]['boot_state'] == 'Alive':
-
- res['geni_status'] = 'ready'
- else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
- logger.debug("IOTLABDRIVER \tsliver_statusresources %s res %s "
- % (resources, res))
- return result
-
- def get_user_record(self, hrn):
- """
-
- Returns the user record based on the hrn from the SFA DB .
-
- :param hrn: user's hrn
- :type hrn: string
- :returns: user record from SFA database
- :rtype: RegUser
-
- """
- return self.api.dbsession().query(RegRecord).filter_by(hrn=hrn).first()
-
- def testbed_name(self):
- """
-
- Returns testbed's name.
- :returns: testbed authority name.
- :rtype: string
-
- """
- return self.hrn
-
-
- def _get_requested_leases_list(self, rspec):
- """
- Process leases in rspec depending on the rspec version (format)
- type. Find the lease requests in the rspec and creates
- a lease request list with the mandatory information ( nodes,
- start time and duration) of the valid leases (duration above or
- equal to the iotlab experiment minimum duration).
-
- :param rspec: rspec request received.
- :type rspec: RSpec
- :returns: list of lease requests found in the rspec
- :rtype: list
- """
- requested_lease_list = []
- for lease in rspec.version.get_leases():
- single_requested_lease = {}
- logger.debug("IOTLABDRIVER.PY \t \
- _get_requested_leases_list lease %s " % (lease))
-
- if not lease.get('lease_id'):
- if get_authority(lease['component_id']) == \
- self.testbed_shell.root_auth:
- single_requested_lease['hostname'] = \
- xrn_to_hostname(\
- lease.get('component_id').strip())
- single_requested_lease['start_time'] = \
- lease.get('start_time')
- single_requested_lease['duration'] = lease.get('duration')
- #Check the experiment's duration is valid before adding
- #the lease to the requested leases list
- duration_in_seconds = \
- int(single_requested_lease['duration'])
- if duration_in_seconds >= self.testbed_shell.GetMinExperimentDurationInGranularity():
- requested_lease_list.append(single_requested_lease)
-
- return requested_lease_list
-
- @staticmethod
- def _group_leases_by_start_time(requested_lease_list):
- """
- Create dict of leases by start_time, regrouping nodes reserved
- at the same time, for the same amount of time so as to
- define one job on OAR.
-
- :param requested_lease_list: list of leases
- :type requested_lease_list: list
- :returns: Dictionary with key = start time, value = list of leases
- with the same start time.
- :rtype: dictionary
-
- """
-
- requested_xp_dict = {}
- for lease in requested_lease_list:
-
- #In case it is an asap experiment start_time is empty
- if lease['start_time'] == '':
- lease['start_time'] = '0'
-
- if lease['start_time'] not in requested_xp_dict:
- if isinstance(lease['hostname'], str):
- lease['hostname'] = [lease['hostname']]
-
- requested_xp_dict[lease['start_time']] = lease
-
- else:
- job_lease = requested_xp_dict[lease['start_time']]
- if lease['duration'] == job_lease['duration']:
- job_lease['hostname'].append(lease['hostname'])
-
- return requested_xp_dict
-
- def _process_requested_xp_dict(self, rspec):
- """
- Turns the requested leases and information into a dictionary
- of requested jobs, grouped by starting time.
-
- :param rspec: RSpec received
- :type rspec : RSpec
- :rtype: dictionary
-
- """
- requested_lease_list = self._get_requested_leases_list(rspec)
- logger.debug("IOTLABDRIVER _process_requested_xp_dict \
- requested_lease_list %s" % (requested_lease_list))
- xp_dict = self._group_leases_by_start_time(requested_lease_list)
- logger.debug("IOTLABDRIVER _process_requested_xp_dict xp_dict\
- %s" % (xp_dict))
-
- return xp_dict
-
-
-
- def delete(self, slice_urns, options=None):
- """
- Deletes the lease associated with the slice hrn and the credentials
- if the slice belongs to iotlab. Answer to DeleteSliver.
-
- :param slice_urn: urn of the slice
- :type slice_urn: string
-
-
- :returns: 1 if the slice to delete was not found on iotlab,
- True if the deletion was successful, False otherwise otherwise.
-
- .. note:: Should really be named delete_leases because iotlab does
- not have any slivers, but only deals with leases. However,
- SFA api only have delete_sliver define so far. SA 13/05/2013
- .. note:: creds are unused, and are not used either in the dummy driver
- delete_sliver .
- """
- if options is None: options={}
- # collect sliver ids so we can update sliver allocation states after
- # we remove the slivers.
- aggregate = IotlabAggregate(self)
- slivers = aggregate.get_slivers(slice_urns)
- if slivers:
- # slice_id = slivers[0]['slice_id']
- node_ids = []
- sliver_ids = []
- sliver_jobs_dict = {}
- for sliver in slivers:
- node_ids.append(sliver['node_id'])
- sliver_ids.append(sliver['sliver_id'])
- job_id = sliver['sliver_id'].split('+')[-1].split('-')[0]
- sliver_jobs_dict[job_id] = sliver['sliver_id']
- logger.debug("IOTLABDRIVER.PY delete_sliver slivers %s slice_urns %s"
- % (slivers, slice_urns))
- slice_hrn = urn_to_hrn(slice_urns[0])[0]
-
- sfa_slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if not sfa_slice_list:
- return 1
-
- #Delete all leases in the slice
- for sfa_slice in sfa_slice_list:
- logger.debug("IOTLABDRIVER.PY delete_sliver slice %s" % (sfa_slice))
- slices = IotlabSlices(self)
- # determine if this is a peer slice
-
- peer = slices.get_peer(slice_hrn)
-
- logger.debug("IOTLABDRIVER.PY delete_sliver peer %s \
- \r\n \t sfa_slice %s " % (peer, sfa_slice))
- oar_bool_ans = self.testbed_shell.DeleteSliceFromNodes(
- sfa_slice)
- for job_id in oar_bool_ans:
- # if the job has not been successfully deleted
- # don't delete the associated sliver
- # remove it from the sliver list
- if oar_bool_ans[job_id] is False:
- sliver = sliver_jobs_dict[job_id]
- sliver_ids.remove(sliver)
- try:
-
- dbsession = self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
- except :
- logger.log_exc("IOTLABDRIVER.PY delete error ")
-
- # prepare return struct
- geni_slivers = []
- for sliver in slivers:
- geni_slivers.append(
- {'geni_sliver_urn': sliver['sliver_id'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': datetime_to_string(utcparse(sliver['expires']))})
- return geni_slivers
-
-
-
-
- def list_slices(self, creds, options):
- """Answer to ListSlices.
-
- List slices belonging to iotlab, returns slice urns list.
- No caching used. Options unused but are defined in the SFA method
- api prototype.
-
- :returns: slice urns list
- :rtype: list
-
- .. note:: creds and options are unused - SA 12/12/13
- """
- # look in cache first
- #if self.cache:
- #slices = self.cache.get('slices')
- #if slices:
- #logger.debug("PlDriver.list_slices returns from cache")
- #return slices
-
- # get data from db
-
- slices = self.GetSlices()
- logger.debug("IOTLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n"
- % (slices))
- slice_hrns = [iotlab_slice['hrn'] for iotlab_slice in slices]
-
- slice_urns = [hrn_to_urn(slice_hrn, 'slice')
- for slice_hrn in slice_hrns]
-
- # cache the result
- #if self.cache:
- #logger.debug ("IotlabDriver.list_slices stores value in cache")
- #self.cache.add('slices', slice_urns)
-
- return slice_urns
+ def check_sliver_credentials(self, creds, urns):
+ """ Not used and need by SFA """
+ pass
+ # #######################################
+ # ######### registry oriented
+ # #######################################
+ ##########
def register(self, sfa_record, hrn, pub_key):
- """
- Adding new user, slice, node or site should not be handled
- by SFA.
-
- ..warnings:: should not be used. Different components are in charge of
- doing this task. Adding nodes = OAR
- Adding users = LDAP Iotlab
- Adding slice = Import from LDAP users
- Adding site = OAR
-
- :param sfa_record: record provided by the client of the
- Register API call.
- :type sfa_record: dict
- :param pub_key: public key of the user
- :type pub_key: string
-
- .. note:: DOES NOTHING. Returns -1.
-
- """
+ logger.warning("iotlabdriver register : not implemented")
return -1
-
def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
- """
- No site or node record update allowed in Iotlab. The only modifications
- authorized here are key deletion/addition on an existing user and
- password change. On an existing user, CAN NOT BE MODIFIED: 'first_name',
- 'last_name', 'email'. DOES NOT EXIST IN SENSLAB: 'phone', 'url', 'bio',
- 'title', 'accepted_aup'. A slice is bound to its user, so modifying the
- user's ssh key should nmodify the slice's GID after an import procedure.
-
- :param old_sfa_record: what is in the db for this hrn
- :param new_sfa_record: what was passed to the update call
- :param new_key: the new user's public key
- :param hrn: the user's sfa hrn
- :type old_sfa_record: dict
- :type new_sfa_record: dict
- :type new_key: string
- :type hrn: string
-
- TODO: needs review
- .. warning:: SA 12/12/13 - Removed. should be done in iotlabimporter
- since users, keys and slice are managed by the LDAP.
-
- """
- # pointer = old_sfa_record['pointer']
- # old_sfa_record_type = old_sfa_record['type']
-
- # # new_key implemented for users only
- # if new_key and old_sfa_record_type not in ['user']:
- # raise UnknownSfaType(old_sfa_record_type)
-
- # if old_sfa_record_type == "user":
- # update_fields = {}
- # all_fields = new_sfa_record
- # for key in all_fields.keys():
- # if key in ['key', 'password']:
- # update_fields[key] = all_fields[key]
-
- # if new_key:
- # # must check this key against the previous one if it exists
- # persons = self.testbed_shell.GetPersons([old_sfa_record])
- # person = persons[0]
- # keys = [person['pkey']]
- # #Get all the person's keys
- # keys_dict = self.GetKeys(keys)
-
- # # Delete all stale keys, meaning the user has only one key
- # #at a time
- # #TODO: do we really want to delete all the other keys?
- # #Is this a problem with the GID generation to have multiple
- # #keys? SA 30/05/13
- # key_exists = False
- # if key in keys_dict:
- # key_exists = True
- # else:
- # #remove all the other keys
- # for key in keys_dict:
- # self.testbed_shell.DeleteKey(person, key)
- # self.testbed_shell.AddPersonKey(
- # person, {'sshPublicKey': person['pkey']},
- # {'sshPublicKey': new_key})
- logger.warning ("UNDEFINED - Update should be done by the \
- iotlabimporter")
+ logger.warning("iotlabdriver update : not implemented")
return True
def remove(self, sfa_record):
- """
-
- Removes users only. Mark the user as disabled in LDAP. The user and his
- slice are then deleted from the db by running an import on the registry.
-
- :param sfa_record: record is the existing sfa record in the db
- :type sfa_record: dict
-
- ..warning::As fas as the slice is concerned, here only the leases are
- removed from the slice. The slice is record itself is not removed
- from the db.
-
- TODO: needs review
-
- TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
-
- TODO: return boolean for the slice part
- """
- sfa_record_type = sfa_record['type']
- hrn = sfa_record['hrn']
- if sfa_record_type == 'user':
-
- #get user from iotlab ldap
- person = self.testbed_shell.GetPersons(sfa_record)
- #No registering at a given site in Iotlab.
- #Once registered to the LDAP, all iotlab sites are
- #accesible.
- if person:
- #Mark account as disabled in ldap
- return self.testbed_shell.DeletePerson(sfa_record)
-
- elif sfa_record_type == 'slice':
- if self.GetSlices(slice_filter=hrn,
- slice_filter_type='slice_hrn'):
- ret = self.testbed_shell.DeleteSlice(sfa_record)
- return True
-
- def check_sliver_credentials(self, creds, urns):
- """Check that the sliver urns belongs to the slice specified in the
- credentials.
-
- :param urns: list of sliver urns.
- :type urns: list.
- :param creds: slice credentials.
- :type creds: Credential object.
-
-
- """
- # build list of cred object hrns
- slice_cred_names = []
- for cred in creds:
- slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
- slicename = IotlabXrn(xrn=slice_cred_hrn).iotlab_slicename()
- slice_cred_names.append(slicename)
-
- # look up slice name of slivers listed in urns arg
-
- slice_ids = []
- for urn in urns:
- sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
- try:
- slice_ids.append(int(sliver_id_parts[0]))
- except ValueError:
- pass
-
- if not slice_ids:
- raise Forbidden("sliver urn not provided")
+ logger.warning("iotlabdriver remove : not implemented")
+ return True
- slices = self.GetSlices(slice_ids)
- sliver_names = [single_slice['name'] for single_slice in slices]
+ # #######################################
+ # ######### aggregate oriented
+ # #######################################
- # make sure we have a credential for every specified sliver
- for sliver_name in sliver_names:
- if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
- raise Forbidden(msg)
+ def provision(self, urns, options=None):
+ logger.warning("iotlabdriver provision : not implemented")
+ version_manager = VersionManager()
+ opt = options['geni_rspec_version']
+ rspec_version = version_manager.get_version(opt)
+ return self.describe(urns, rspec_version, options=options)
- ########################################
- ########## aggregate oriented
- ########################################
+ def delete(self, urns, options=None):
+ logger.warning("iotlabdriver delete : not implemented")
+ geni_slivers = []
+ return geni_slivers
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
def aggregate_version(self):
- """
-
- Returns the testbed's supported rspec advertisement and request
- versions.
- :returns: rspec versions supported ad a dictionary.
- :rtype: dict
-
- """
version_manager = VersionManager()
ad_rspec_versions = []
request_rspec_versions = []
if rspec_version.content_type in ['*', 'request']:
request_rspec_versions.append(rspec_version.to_dict())
return {
- 'testbed': self.testbed_name(),
+ 'testbed': self.hrn,
'geni_request_rspec_versions': request_rspec_versions,
'geni_ad_rspec_versions': ad_rspec_versions}
- # first 2 args are None in case of resource discovery
- def list_resources (self, version=None, options=None):
- if options is None: options={}
- aggregate = IotlabAggregate(self)
- rspec = aggregate.list_resources(version=version, options=options)
+ def list_resources(self, version=None, options=None):
+ logger.warning("iotlabdriver list_resources")
+ if not options:
+ options = {}
+ aggregate = IotLABAggregate(self)
+ rspec = aggregate.list_resources(version=version, options=options)
return rspec
- def describe(self, urns, version, options={}):
- aggregate = IotlabAggregate(self)
+ def describe(self, urns, version, options=None):
+ logger.warning("iotlabdriver describe")
+ if not options:
+ options = {}
+ aggregate = IotLABAggregate(self)
return aggregate.describe(urns, version=version, options=options)
- def status (self, urns, options=None):
- if options is None: options={}
- aggregate = IotlabAggregate(self)
- desc = aggregate.describe(urns, version='GENI 3')
+ def status(self, urns, options=None):
+ logger.warning("iotlabdriver status")
+ aggregate = IotLABAggregate(self)
+ desc = aggregate.describe(urns, version='GENI 3')
status = {'geni_urn': desc['geni_urn'],
'geni_slivers': desc['geni_slivers']}
return status
+ def _get_users(self):
+ """ Get all users """
+ ret = self.shell.get_users()
+ if 'error' in ret:
+ return None
+ return ret
- def allocate (self, urn, rspec_string, expiration, options=None):
- if options is None: options={}
- xrn = Xrn(urn)
- aggregate = IotlabAggregate(self)
-
- slices = IotlabSlices(self)
- peer = slices.get_peer(xrn.get_hrn())
- sfa_peer = slices.get_sfa_peer(xrn.get_hrn())
-
- caller_hrn = options.get('actual_caller_hrn', [])
- caller_xrn = Xrn(caller_hrn)
- caller_urn = caller_xrn.get_urn()
-
- logger.debug("IOTLABDRIVER.PY :: Allocate caller = %s" % (caller_urn))
+ def _get_user_login(self, caller_user):
+ """ Get user login with email """
+ email = caller_user['email']
+ # ensure user exist in LDAP tree
+ users = self._get_users()
+ if users and email not in users:
+ self.shell.add_user(caller_user)
+ users = self._get_users()
+ if users and email in users:
+ return users[email]['login']
+ else:
+ return None
- slice_record = {}
- users = options.get('geni_users', [])
- sfa_users = options.get('sfa_users', [])
-
- if sfa_users:
- user = None
- # Looking for the user who actually called the Allocate function in the list of users of the slice
- for u in sfa_users:
- if 'urn' in u and u['urn'] == caller_urn:
- user = u
- logger.debug("user = %s" % u)
- # If we find the user in the list we use it, else we take the 1st in the list as before
- if user:
- user_hrn = caller_hrn
- else:
- user = sfa_users[0]
- # XXX Always empty ??? no slice_record in the Allocate call
- #slice_record = sfa_users[0].get('slice_record', [])
- user_xrn = Xrn(sfa_users[0]['urn'])
- user_hrn = user_xrn.get_hrn()
+ @classmethod
+ def _get_experiment(cls, rspec):
+ """
+ Find in RSpec leases the experiment start time, duration and nodes
+ list.
+
+ :Example:
+ <rspec>
+ ...
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792400" duration="30">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/>
+ </lease>
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792600" duration="50">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-15.grenoble.iot-lab.info"/>
+ </lease>
+ ...
+ </rspec>
+ """
+ leases = rspec.version.get_leases()
+ start_time = min([int(lease['start_time'])
+ for lease in leases])
+ # ASAP jobs
+ if start_time == 0:
+ start_time = None
+ duration = max([int(lease['duration'])
+ for lease in leases])
+ # schedule jobs
+ else:
+ end_time = max([int(lease['start_time']) +
+ int(lease['duration'])*60
+ for lease in leases])
+ from math import floor
+ # minutes
+ duration = floor((end_time - start_time)/60)
+ nodes_list = [Xrn.unescape(Xrn(lease['component_id'].strip(),
+ type='node').get_leaf())
+ for lease in leases]
+ # uniq hostnames
+ nodes_list = list(set(nodes_list))
+ return nodes_list, start_time, duration
+
+ def _save_db_lease(self, job_id, slice_hrn):
+ """ Save lease table row in SFA database """
+ lease_row = LeaseTable(job_id,
+ slice_hrn)
+ logger.warning("iotlabdriver _save_db_lease lease row : %s" %
+ lease_row)
+ self.api.dbsession().add(lease_row)
+ self.api.dbsession().commit()
- slice_record = user.get('slice_record', {})
- slice_record['user'] = {'keys': user['keys'],
- 'email': user['email'],
- 'hrn': user_hrn}
- slice_record['authority'] = xrn.get_authority_hrn()
+ def allocate(self, urn, rspec_string, expiration, options=None):
+ """
+ Allocate method submit an experiment on Iot-LAB testbed with :
+ * user : get the slice user which launch request (caller_hrn)
+ * reservation : get the start time and duration in RSpec leases
+ * nodes : get the nodes list in RSpec leases
+ If we have a request success on Iot-LAB testbed we store in SFA
+ database the assocation OAR scheduler job id and slice hrn
- logger.debug("IOTLABDRIVER.PY \t urn %s allocate options %s "
- % (urn, options))
+ :param urn : slice urn
+ :param rspec_string : RSpec received
+ :param options : options with slice users (geni_users)
+ """
+ # pylint:disable=R0914
+ logger.warning("iotlabdriver allocate")
+ xrn = Xrn(urn)
+ aggregate = IotLABAggregate(self)
# parse rspec
rspec = RSpec(rspec_string)
- # requested_attributes = rspec.version.get_slice_attributes()
-
- # ensure site record exists
-
- # ensure person records exists
- for user in users:
- # XXX LOIC using hrn is a workaround because the function
- # Xrn.get_urn returns 'urn:publicid:IDN+onelab:upmc+timur_friedman'
- # Instead of this 'urn:publicid:IDN+onelab:upmc+user+timur_friedman'
- user['hrn'] = urn_to_hrn(user['urn'])[0]
- # XXX LOIC adding the users of the slice to reg-researchers
- # reg-researchers is used in iotlabslices.py verify_slice in order to add the slice
- if 'reg-researchers' not in slice_record:
- slice_record['reg-researchers'] = list()
- slice_record['reg-researchers'].append(user['hrn'])
- if caller_hrn == user['hrn']:
- #hierarchical_user = user['hrn'].split(".")
- #user['login'] = hierarchical_user[-1]
- #slice_record['login'] = user['login']
- slice_record['user']=user
-
- # oui c'est degueulasse, le slice_record se retrouve modifie
- # dans la methode avec les infos du user, els infos sont propagees
- # dans verify_slice_leases
- logger.debug("IOTLABDRIVER.PY BEFORE slices.verify_persons")
-
- # XXX JORDAN XXX slice_record devrait recevoir le caller_xrn...
- # LOIC maintenant c'est fait au dessus
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[user][hrn] = %s" % slice_record['user']['hrn'])
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[reg-researchers] = %s" % slice_record['reg-researchers'])
- persons = slices.verify_persons(xrn.hrn, slice_record, users,
- options=options)
- logger.debug("IOTLABDRIVER.PY AFTER slices.verify_persons")
- logger.debug("LOIC - slice_record[user] = %s" % slice_record['user'])
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[reg-researchers] = %s" % slice_record['reg-researchers'])
-
- # ensure slice record exists
- current_slice = slices.verify_slice(xrn.hrn, slice_record, sfa_peer)
- logger.debug("LOIC - AFTER verify_slice - slice_record[user] = %s" % slice_record['user'])
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[reg-researchers] = %s" % slice_record['reg-researchers'])
- logger.debug("IOTLABDRIVER.PY \t ===============allocate \t\
- \r\n \r\n current_slice %s" % (current_slice))
-
- # ensure slice attributes exists
- # slices.verify_slice_attributes(slice, requested_attributes,
- # options=options)
- # add/remove slice from nodes
- # XXX JORDAN ensure requested_xp_dict returns a dict with all new leases
- requested_xp_dict = self._process_requested_xp_dict(rspec)
-
- logger.debug("IOTLABDRIVER.PY \tallocate requested_xp_dict %s "
- % (requested_xp_dict))
- request_nodes = rspec.version.get_nodes_with_slivers()
-
-
- # JORDAN: nodes_list will contain a list of newly allocated nodes
- nodes_list = []
- for start_time in requested_xp_dict:
- lease = requested_xp_dict[start_time]
- for hostname in lease['hostname']:
- nodes_list.append(hostname)
-
- # nodes = slices.verify_slice_nodes(slice_record,request_nodes, peer)
- logger.debug("IOTLABDRIVER.PY \tallocate nodes_list %s slice_record %s"
- % (nodes_list, slice_record))
-
- # add/remove leases
- rspec_requested_leases = rspec.version.get_leases()
- leases = slices.verify_slice_leases(slice_record,
- requested_xp_dict, peer)
- # JORDAN:
- # leases = already in slice
- # rspec_requested_leases = newly requested
- logger.debug("IOTLABDRIVER.PY \tallocate leases %s \
- rspec_requested_leases %s" % (leases,
- rspec_requested_leases))
- # update sliver allocations
- # JORDAN Here we loop over newly allocated nodes
- for hostname in nodes_list:
- client_id = hostname
- node_urn = xrn_object(self.testbed_shell.root_auth, hostname).urn
- component_id = node_urn
- if 'reg-urn' in current_slice:
- slice_urn = current_slice['reg-urn']
- else:
- slice_urn = current_slice['urn']
-
- # JORDAN: We loop over leases previously in the slice
- for lease in leases:
- if hostname in lease['reserved_nodes']:
- index = lease['reserved_nodes'].index(hostname)
- sliver_hrn = '%s.%s-%s' % (self.hrn, lease['lease_id'],
- lease['resource_ids'][index] )
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
- record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
- component_id=component_id,
- slice_urn = slice_urn,
- allocation_state='geni_allocated')
- record.sync(self.api.dbsession())
-
- # JORDAN : added describe_options which was not specified at all
- describe_options = {
- 'geni_slice_urn': urn,
- 'list_leases': 'all',
- }
- return aggregate.describe([xrn.get_urn()], version=rspec.version, options=describe_options)
-
- def provision(self, urns, options=None):
- if options is None: options={}
- # update users
- slices = IotlabSlices(self)
- aggregate = IotlabAggregate(self)
- slivers = aggregate.get_slivers(urns)
- current_slice = slivers[0]
- peer = slices.get_peer(current_slice['hrn'])
- sfa_peer = slices.get_sfa_peer(current_slice['hrn'])
- users = options.get('geni_users', [])
- # persons = slices.verify_persons(current_slice['hrn'],
- # current_slice, users, peer, sfa_peer, options=options)
- # slices.handle_peer(None, None, persons, peer)
- # update sliver allocation states and set them to geni_provisioned
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',
- dbsession)
- version_manager = VersionManager()
- rspec_version = version_manager.get_version(options[
- 'geni_rspec_version'])
- # JORDAN : added describe_options instead of options
- # urns at the begining ???
- describe_options = {
- 'geni_slice_urn': current_slice['urn'],
- 'list_leases': 'all',
- }
- return self.describe(urns, rspec_version, options=describe_options)
+ caller_hrn = options.get('actual_caller_hrn', [])
+ geni_users = options.get('geni_users', [])
+ caller_user = [user for user in geni_users if
+ urn_to_hrn(user['urn'])[0] == caller_hrn][0]
+ logger.warning("iotlabdriver allocate caller : %s" %
+ caller_user['email'])
+
+ login = self._get_user_login(caller_user)
+ # only if we have a user
+ if login:
+ nodes_list, start_time, duration = \
+ self._get_experiment(rspec)
+ logger.warning("iotlabdriver allocate submit OAR job :"
+ " %s %s %s %s" %
+ (xrn.hrn, start_time, duration, nodes_list))
+ # [0-9A-Za-z_] with onelab.inria.test_iotlab
+ exp_name = '_'.join((xrn.hrn).split('.'))
+ # submit OAR job
+ ret = self.shell.reserve_nodes(login,
+ exp_name,
+ nodes_list,
+ start_time,
+ duration)
+
+ # in case of job submission success save slice and lease job
+ # id association in database
+ if 'id' in ret:
+ self._save_db_lease(int(ret['id']),
+ xrn.hrn)
+
+ return aggregate.describe([xrn.get_urn()], version=rspec.version)
--- /dev/null
+# -*- coding:utf-8 -*-
+""" PostGreSQL table management """
+
+from sfa.storage.model import Base
+from sqlalchemy import Column, Integer, String
+
+
+class LeaseTable(Base):
+ """ SQL alchemy class to manipulate the rows of the lease_table table in the
+ SFA database. Table creation is made by the importer (iotlabimporter.py)
+ if it is not in the database yet.
+
+ As we don't have a link between a lease (OAR job submission) and a slice we
+ store this information in database. We matched OAR job id and slice hrn.
+ """
+ # pylint:disable=R0903
+ __tablename__ = 'lease_table'
+
+ job_id = Column(Integer, primary_key=True)
+ slice_hrn = Column(String)
+
+ def __init__(self, job_id, slice_hrn):
+ """
+ Defines a row of the lease_table table
+ """
+ self.job_id = job_id
+ self.slice_hrn = slice_hrn
+
+ def __repr__(self):
+ """Prints the SQLAlchemy record to the format defined
+ by the function.
+ """
+ result = "job_id %s, slice_hrn = %s" % (self.job_id,
+ self.slice_hrn)
+ return result
+++ /dev/null
-"""
-File holding a class to define the table in the iotlab dedicated table.
-The table is the SFA dtabase, therefore all the access mecanism
-(session, engine...) is handled by alchemy.py.
-
-..seealso:: alchemy.py
-"""
-
-from sfa.storage.model import Base
-from sqlalchemy import Column, Integer, String
-
-
-
-class LeaseTableXP (Base):
- """ SQL alchemy class to manipulate the rows of the lease_table table in the
- SFA database. Handles the records representation and creates.
- Table creation is made by the importer if it is not in the database yet.
-
- .. seealso:: init_tables in model.py, run in iotlabimporter.py
-
- """
- __tablename__ = 'lease_table'
-
- slice_hrn = Column(String)
- experiment_id = Column(Integer, primary_key=True)
- end_time = Column(Integer, nullable=False)
-
- def __init__(self, slice_hrn=None, experiment_id=None, end_time=None):
- """
- Defines a row of the lease_table table
- """
- if slice_hrn:
- self.slice_hrn = slice_hrn
- if experiment_id:
- self.experiment_id = experiment_id
- if end_time:
- self.end_time = end_time
-
- def __repr__(self):
- """Prints the SQLAlchemy record to the format defined
- by the function.
- """
- result = "<lease_table : slice_hrn = %s , experiment_id %s \
- end_time = %s" % (self.slice_hrn, self.experiment_id,
- self.end_time)
- result += ">"
- return result
-"""
-File containing the IotlabShell, used to interact with nodes, users,
-slices, leases and keys, as well as the dedicated iotlab database and table,
-holding information about which slice is running which job.
-
-"""
-from datetime import datetime
+# -*- coding:utf-8 -*-
+""" Shell driver management """
from sfa.util.sfalogging import logger
-from sfa.util.sfatime import SFATIME_FORMAT
-
-from sfa.iotlab.OARrestapi import OARrestapi
-from sfa.iotlab.LDAPapi import LDAPapi
-
-
-class IotlabShell():
- """ Class enabled to use LDAP and OAR api calls. """
-
- _MINIMUM_DURATION = 10 # 10 units of granularity 60 s, 10 mins
-
- def __init__(self, config):
- """Creates an instance of OARrestapi and LDAPapi which will be used to
- issue calls to OAR or LDAP methods.
- Set the time format and the testbed granularity used for OAR
- reservation and leases.
-
- :param config: configuration object from sfa.util.config
- :type config: Config object
- """
+from iotlabcli import auth
+from iotlabcli import rest
+from iotlabcli import helpers
+from iotlabcli import experiment
+from urllib2 import HTTPError
- # self.leases_db = TestbedAdditionalSfaDB(config)
- self.oar = OARrestapi()
- self.ldap = LDAPapi()
- self.time_format = SFATIME_FORMAT
- self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
- self.grain = 60 # 10 mins lease minimum, 60 sec granularity
- #import logging, logging.handlers
- #from sfa.util.sfalogging import _SfaLogger
- #sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', \
- #level=logging.DEBUG)
- return
-
- @staticmethod
- def GetMinExperimentDurationInGranularity():
- """ Returns the minimum allowed duration for an experiment on the
- testbed. In seconds.
-
- """
- return IotlabShell._MINIMUM_DURATION
-
-
-
-
- #TODO : Handling OR request in make_ldap_filters_from_records
- #instead of the for loop
- #over the records' list
- def GetPersons(self, person_filter=None):
- """
- Get the enabled users and their properties from Iotlab LDAP.
- If a filter is specified, looks for the user whose properties match
- the filter, otherwise returns the whole enabled users'list.
- :param person_filter: Must be a list of dictionnaries with users
- properties when not set to None.
- :type person_filter: list of dict
+class IotLABShell(object):
+ """
+ A REST client shell to the Iot-LAB testbed API instance
+ """
- :returns: Returns a list of users whose accounts are enabled
- found in ldap.
- :rtype: list of dicts
+ def __init__(self):
+ user, passwd = auth.get_user_credentials()
+ self.api = rest.Api(user, passwd)
+ def get_nodes(self):
"""
- logger.debug("IOTLAB_API \tGetPersons 1st person_filter %s"
- % (person_filter[0]['hrn']))
- person_list = []
- if person_filter and isinstance(person_filter, list):
- #If we are looking for a list of users (list of dict records)
- #Usually the list contains only one user record
- for searched_attributes in person_filter:
-
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person = self.ldap.LdapFindUser(searched_attributes,
- is_user_enabled=True)
- #If a person was found, append it to the list
- if person:
- person_list.append(person)
-
- #If the list is empty, return None
- if len(person_list) is 0:
- person_list = None
-
- else:
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person_list = self.ldap.LdapFindUser(is_user_enabled=True)
-
- return person_list
-
-
- #def GetTimezone(self):
- #""" Returns the OAR server time and timezone.
- #Unused SA 30/05/13"""
- #server_timestamp, server_tz = self.oar.parser.\
- #SendRequest("GET_timezone")
- #return server_timestamp, server_tz
-
- def DeleteJobs(self, job_id, username):
- """
-
- Deletes the job with the specified job_id and username on OAR by
- posting a delete request to OAR.
-
- :param job_id: job id in OAR.
- :param username: user's iotlab login in LDAP.
- :type job_id: integer
- :type username: string
-
- :returns: dictionary with the job id and if delete has been successful
- (True) or no (False)
+ Get all OAR nodes
+ :returns: nodes with OAR properties
:rtype: dict
- """
- logger.debug("IOTLAB_API \tDeleteJobs jobid %s username %s "
- % (job_id, username))
- if not job_id or job_id is -1:
- return
-
- reqdict = {}
- reqdict['method'] = "delete"
- reqdict['strval'] = str(job_id)
-
- answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id',
- reqdict, username)
- if answer['status'] == 'Delete request registered':
- ret = {job_id: True}
- else:
- ret = {job_id: False}
- logger.debug("IOTLAB_API \tDeleteJobs jobid %s \r\n answer %s \
- username %s" % (job_id, answer, username))
- return ret
-
-
-
- ##TODO : Unused GetJobsId ? SA 05/07/12
- #def GetJobsId(self, job_id, username = None ):
- #"""
- #Details about a specific job.
- #Includes details about submission time, jot type, state, events,
- #owner, assigned ressources, walltime etc...
-
- #"""
- #req = "GET_jobs_id"
- #node_list_k = 'assigned_network_address'
- ##Get job info from OAR
- #job_info = self.oar.parser.SendRequest(req, job_id, username)
-
- #logger.debug("IOTLAB_API \t GetJobsId %s " %(job_info))
- #try:
- #if job_info['state'] == 'Terminated':
- #logger.debug("IOTLAB_API \t GetJobsId job %s TERMINATED"\
- #%(job_id))
- #return None
- #if job_info['state'] == 'Error':
- #logger.debug("IOTLAB_API \t GetJobsId ERROR message %s "\
- #%(job_info))
- #return None
-
- #except KeyError:
- #logger.error("IOTLAB_API \tGetJobsId KeyError")
- #return None
-
- #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
- #node_list_k)
- ##Replaces the previous entry
- ##"assigned_network_address" / "reserved_resources"
- ##with "node_ids"
- #job_info.update({'node_ids':parsed_job_info[node_list_k]})
- #del job_info[node_list_k]
- #logger.debug(" \r\nIOTLAB_API \t GetJobsId job_info %s " %(job_info))
- #return job_info
-
-
- def GetJobsResources(self, job_id, username = None):
- """ Gets the list of nodes associated with the job_id and username
- if provided.
-
- Transforms the iotlab hostnames to the corresponding SFA nodes hrns.
- Returns dict key :'node_ids' , value : hostnames list.
-
- :param username: user's LDAP login
- :paran job_id: job's OAR identifier.
- :type username: string
- :type job_id: integer
-
- :returns: dicionary with nodes' hostnames belonging to the job.
- :rtype: dict
-
- .. warning:: Unused. SA 16/10/13
- """
-
- req = "GET_jobs_id_resources"
-
-
- #Get job resources list from OAR
- node_id_list = self.oar.parser.SendRequest(req, job_id, username)
- logger.debug("IOTLAB_API \t GetJobsResources %s " %(node_id_list))
- resources = self.GetNodes()
- oar_id_node_dict = {}
- for node in resources:
- oar_id_node_dict[node['oar_id']] = node['hostname']
- hostname_list = \
- self.__get_hostnames_from_oar_node_ids(oar_id_node_dict,
- node_id_list)
-
-
- #Replaces the previous entry "assigned_network_address" /
- #"reserved_resources" with "node_ids"
- job_info = {'node_ids': hostname_list}
-
- return job_info
-
-
- def GetNodesCurrentlyInUse(self):
- """Returns a list of all the nodes already involved in an oar running
- job.
- :rtype: list of nodes hostnames.
- """
- return self.oar.parser.SendRequest("GET_running_jobs")
-
- @staticmethod
- def __get_hostnames_from_oar_node_ids(oar_id_node_dict,
- resource_id_list ):
- """Get the hostnames of the nodes from their OAR identifiers.
- Get the list of nodes dict using GetNodes and find the hostname
- associated with the identifier.
- :param oar_id_node_dict: full node dictionary list keyed by oar node id
- :param resource_id_list: list of nodes identifiers
- :returns: list of node hostnames.
- """
-
- hostname_list = []
- for resource_id in resource_id_list:
- #Because jobs requested "asap" do not have defined resources
- if resource_id is not "Undefined":
- hostname_list.append(\
- oar_id_node_dict[resource_id]['hostname'])
-
- return hostname_list
-
- def GetReservedNodes(self, username=None):
- """ Get list of leases. Get the leases for the username if specified,
- otherwise get all the leases. Finds the nodes hostnames for each
- OAR node identifier.
- :param username: user's LDAP login
- :type username: string
- :returns: list of reservations dict
- :rtype: dict list
- """
-
- #Get the nodes in use and the reserved nodes
- reservation_dict_list = \
- self.oar.parser.SendRequest("GET_reserved_nodes", \
- username = username)
-
- # Get the full node dict list once for all
- # so that we can get the hostnames given their oar node id afterwards
- # when the reservations are checked.
- full_nodes_dict_list = self.GetNodes()
- #Put the full node list into a dictionary keyed by oar node id
- oar_id_node_dict = {}
- for node in full_nodes_dict_list:
- oar_id_node_dict[node['oar_id']] = node
-
- for resa in reservation_dict_list:
- logger.debug ("GetReservedNodes resa %s"%(resa))
- #dict list of hostnames and their site
- resa['reserved_nodes'] = \
- self.__get_hostnames_from_oar_node_ids(oar_id_node_dict,
- resa['resource_ids'])
-
- #del resa['resource_ids']
- return reservation_dict_list
-
- def GetNodes(self, node_filter_dict=None, return_fields_list=None):
- """
-
- Make a list of iotlab nodes and their properties from information
- given by OAR. Search for specific nodes if some filters are
- specified. Nodes properties returned if no return_fields_list given:
- 'hrn','archi','mobile','hostname','site','boot_state','node_id',
- 'radio','posx','posy','oar_id','posz'.
-
- :param node_filter_dict: dictionnary of lists with node properties. For
- instance, if you want to look for a specific node with its hrn,
- the node_filter_dict should be {'hrn': [hrn_of_the_node]}
- :type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
- :type return_fields_list: list
- :returns: list of dictionaries with node properties
- :rtype: list
-
- """
- node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
- node_dict_list = node_dict_by_id.values()
- logger.debug (" IOTLAB_API GetNodes node_filter_dict %s \
- return_fields_list %s " % (node_filter_dict, return_fields_list))
- #No filtering needed return the list directly
- if not (node_filter_dict or return_fields_list):
- return node_dict_list
-
- return_node_list = []
- if node_filter_dict:
- for filter_key in node_filter_dict:
- try:
- #Filter the node_dict_list by each value contained in the
- #list node_filter_dict[filter_key]
- for value in node_filter_dict[filter_key]:
- for node in node_dict_list:
- if node[filter_key] == value:
- if return_fields_list:
- tmp = {}
- for k in return_fields_list:
- tmp[k] = node[k]
- return_node_list.append(tmp)
- else:
- return_node_list.append(node)
- except KeyError:
- logger.log_exc("GetNodes KeyError")
- return
-
-
- return return_node_list
-
-
-
-
-
- def GetSites(self, site_filter_name_list=None, return_fields_list=None):
- """Returns the list of Iotlab's sites with the associated nodes and
- the sites' properties as dictionaries.
-
- Site properties:
- ['address_ids', 'slice_ids', 'name', 'node_ids', 'url', 'person_ids',
- 'site_tag_ids', 'enabled', 'site', 'longitude', 'pcu_ids',
- 'max_slivers', 'max_slices', 'ext_consortium_id', 'date_created',
- 'latitude', 'is_public', 'peer_site_id', 'peer_id', 'abbreviated_name']
- Uses the OAR request GET_sites to find the Iotlab's sites.
-
- :param site_filter_name_list: used to specify specific sites
- :param return_fields_list: field that has to be returned
- :type site_filter_name_list: list
- :type return_fields_list: list
-
-
- """
- site_dict = self.oar.parser.SendRequest("GET_sites")
- #site_dict : dict where the key is the sit ename
- return_site_list = []
- if not (site_filter_name_list or return_fields_list):
- return_site_list = site_dict.values()
- return return_site_list
-
- for site_filter_name in site_filter_name_list:
- if site_filter_name in site_dict:
- if return_fields_list:
- for field in return_fields_list:
- tmp = {}
- try:
- tmp[field] = site_dict[site_filter_name][field]
- except KeyError:
- logger.error("GetSites KeyError %s " % (field))
- return None
- return_site_list.append(tmp)
- else:
- return_site_list.append(site_dict[site_filter_name])
-
- return return_site_list
-
-
- #TODO : Check rights to delete person
- def DeletePerson(self, person_record):
- """Disable an existing account in iotlab LDAP.
-
- Users and techs can only delete themselves. PIs can only
- delete themselves and other non-PIs at their sites.
- ins can delete anyone.
-
- :param person_record: user's record
- :type person_record: dict
- :returns: True if successful, False otherwise.
- :rtype: boolean
-
- .. todo:: CHECK THAT ONLY THE USER OR ADMIN CAN DEL HIMSELF.
- """
- #Disable user account in iotlab LDAP
- ret = self.ldap.LdapMarkUserAsDeleted(person_record)
- logger.warning("IOTLAB_API DeletePerson %s " % (person_record))
- return ret['bool']
-
- def DeleteSlice(self, slice_record):
- """Deletes the specified slice and kills the jobs associated with
- the slice if any, using DeleteSliceFromNodes.
-
- :param slice_record: record of the slice, must contain oar_job_id, user
- :type slice_record: dict
- :returns: True if all the jobs in the slice have been deleted,
- or the list of jobs that could not be deleted otherwise.
- :rtype: list or boolean
-
- .. seealso:: DeleteSliceFromNodes
-
- """
- ret = self.DeleteSliceFromNodes(slice_record)
- delete_failed = None
- for job_id in ret:
- if False in ret[job_id]:
- if delete_failed is None:
- delete_failed = []
- delete_failed.append(job_id)
-
- logger.info("IOTLAB_API DeleteSlice %s answer %s"%(slice_record, \
- delete_failed))
- return delete_failed or True
-
-
-
-
-
-
-
-
-
-
-
- #TODO AddPersonKey 04/07/2012 SA
- def AddPersonKey(self, person_uid, old_attributes_dict, new_key_dict):
- """Adds a new key to the specified account. Adds the key to the
- iotlab ldap, provided that the person_uid is valid.
-
- Non-admins can only modify their own keys.
-
- :param person_uid: user's iotlab login in LDAP
- :param old_attributes_dict: dict with the user's old sshPublicKey
- :param new_key_dict: dict with the user's new sshPublicKey
- :type person_uid: string
-
-
- :rtype: Boolean
- :returns: True if the key has been modified, False otherwise.
-
- """
- ret = self.ldap.LdapModify(person_uid, old_attributes_dict, \
- new_key_dict)
- logger.warning("IOTLAB_API AddPersonKey EMPTY - DO NOTHING \r\n ")
- return ret['bool']
-
- def DeleteLeases(self, leases_id_list, slice_hrn):
- """
-
- Deletes several leases, based on their job ids and the slice
- they are associated with. Uses DeleteJobs to delete the jobs
- on OAR. Note that one slice can contain multiple jobs, and in this
- case all the jobs in the leases_id_list MUST belong to ONE slice,
- since there is only one slice hrn provided here.
-
- :param leases_id_list: list of job ids that belong to the slice whose
- slice hrn is provided.
- :param slice_hrn: the slice hrn.
- :type slice_hrn: string
-
- .. warning:: Does not have a return value since there was no easy
- way to handle failure when dealing with multiple job delete. Plus,
- there was no easy way to report it to the user.
-
- """
- logger.debug("IOTLAB_API DeleteLeases leases_id_list %s slice_hrn %s \
- \r\n " %(leases_id_list, slice_hrn))
- for job_id in leases_id_list:
- self.DeleteJobs(job_id, slice_hrn)
-
- return
-
- @staticmethod
- def _process_walltime(duration):
- """ Calculates the walltime in seconds from the duration in H:M:S
- specified in the RSpec.
-
- """
- if duration:
- # Fixing the walltime by adding a few delays.
- # First put the walltime in seconds oarAdditionalDelay = 20;
- # additional delay for /bin/sleep command to
- # take in account prologue and epilogue scripts execution
- # int walltimeAdditionalDelay = 240; additional delay
- #for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
- #in oar.conf
- # Put the duration in seconds first
- #desired_walltime = duration * 60
- desired_walltime = duration
- # JORDAN : removed the 4 minutes added by default in iotlab
- # XXX total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
- total_walltime = desired_walltime # Needed to have slots aligned in MySlice (temp fix) # JA 11/07/2014
- sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
- walltime = []
- #Put the walltime back in str form
- #First get the hours
- walltime.append(str(total_walltime / 3600))
- total_walltime = total_walltime - 3600 * int(walltime[0])
- #Get the remaining minutes
- walltime.append(str(total_walltime / 60))
- total_walltime = total_walltime - 60 * int(walltime[1])
- #Get the seconds
- walltime.append(str(total_walltime))
-
- else:
- logger.log_exc(" __process_walltime duration null")
-
- return walltime, sleep_walltime
-
- @staticmethod
- def _create_job_structure_request_for_OAR(lease_dict):
- """ Creates the structure needed for a correct POST on OAR.
- Makes the timestamp transformation into the appropriate format.
- Sends the POST request to create the job with the resources in
- added_nodes.
-
- """
-
- nodeid_list = []
- reqdict = {}
-
-
- reqdict['workdir'] = '/tmp'
- reqdict['resource'] = "{network_address in ("
-
- for node in lease_dict['added_nodes']:
- logger.debug("\r\n \r\n OARrestapi \t \
- __create_job_structure_request_for_OAR node %s" %(node))
-
- # Get the ID of the node
- nodeid = node
- reqdict['resource'] += "'" + nodeid + "', "
- nodeid_list.append(nodeid)
-
- custom_length = len(reqdict['resource'])- 2
- reqdict['resource'] = reqdict['resource'][0:custom_length] + \
- ")}/nodes=" + str(len(nodeid_list))
-
-
- walltime, sleep_walltime = \
- IotlabShell._process_walltime(\
- int(lease_dict['lease_duration']))
-
-
- reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
- ":" + str(walltime[1]) + ":" + str(walltime[2])
- reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
-
- #In case of a scheduled experiment (not immediate)
- #To run an XP immediately, don't specify date and time in RSpec
- #They will be set to None.
- if lease_dict['lease_start_time'] is not '0':
- #Readable time accepted by OAR
- # converting timestamp to date in the local timezone tz = None
- start_time = datetime.fromtimestamp( \
- int(lease_dict['lease_start_time']), tz=None).\
- strftime(lease_dict['time_format'])
-
- reqdict['reservation'] = str(start_time)
- #If there is not start time, Immediate XP. No need to add special
- # OAR parameters
-
-
- reqdict['type'] = "deploy"
- reqdict['directory'] = ""
- reqdict['name'] = "SFA_" + lease_dict['slice_user']
-
- return reqdict
-
-
- def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
- lease_start_time, lease_duration, slice_user=None):
-
- """
- Create a job request structure based on the information provided
- and post the job on OAR.
- :param added_nodes: list of nodes that belong to the described lease.
- :param slice_name: the slice hrn associated to the lease.
- :param lease_start_time: timestamp of the lease startting time.
- :param lease_duration: lease durationin minutes
-
- """
- lease_dict = {}
- lease_dict['lease_start_time'] = lease_start_time
- lease_dict['lease_duration'] = lease_duration
- lease_dict['added_nodes'] = added_nodes
- lease_dict['slice_name'] = slice_name
- lease_dict['slice_user'] = slice_user
- lease_dict['grain'] = self.GetLeaseGranularity()
- # I don't know why the SFATIME_FORMAT has changed...
- # from sfa.util.sfatime import SFATIME_FORMAT
- # Let's use a fixed format %Y-%m-%d %H:%M:%S
- #lease_dict['time_format'] = self.time_format
- lease_dict['time_format'] = '%Y-%m-%d %H:%M:%S'
-
-
- logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR slice_user %s\
- \r\n " %(slice_user))
- #Create the request for OAR
- reqdict = self._create_job_structure_request_for_OAR(lease_dict)
- # first step : start the OAR job and update the job
- logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR reqdict %s\
- \r\n " %(reqdict))
-
- answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
- reqdict, slice_user)
- logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s " %(answer))
+ :Example:
+ {"items": [
+ {"archi": "a8:at86rf231",
+ "mobile": 0,
+ "mobility_type": " ",
+ "network_address": "a8-53.grenoble.iot-lab.info",
+ "site": "paris",
+ "state": "Alive",
+ "uid": "9856",
+ "x": "0.37",
+ "y": "5.44",
+ "z": "2.33"
+ },
+ {"archi= ...}
+ ]
+ {
+ """
+ logger.warning("iotlashell get_nodes")
+ nodes_dict = {}
try:
- jobid = answer['id']
- except KeyError:
- logger.log_exc("IOTLAB_API \tLaunchExperimentOnOAR \
- Impossible to create job %s " %(answer))
- return None
-
-
-
-
- if jobid :
- logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s \
- added_nodes %s slice_user %s" %(jobid, added_nodes, \
- slice_user))
-
-
- return jobid
-
-
-
-
-
- #Delete the jobs from job_iotlab table
- def DeleteSliceFromNodes(self, slice_record):
- """
-
- Deletes all the running or scheduled jobs of a given slice
- given its record.
-
- :param slice_record: record of the slice, must contain oar_job_id, user
- :type slice_record: dict
-
- :returns: dict of the jobs'deletion status. Success= True, Failure=
- False, for each job id.
+ nodes = experiment.info_experiment(self.api)
+ except HTTPError as err:
+ logger.warning("iotlashell get_nodes error %s" % err.reason)
+ return {'error': err.reason}
+ for node in nodes['items']:
+ nodes_dict[node['network_address']] = node
+ return nodes_dict
+
+ def get_users(self):
+ """
+ Get all LDAP users
+ :returns: users with LDAP attributes
:rtype: dict
- """
- logger.debug("IOTLAB_API \t DeleteSliceFromNodes %s "
- % (slice_record))
-
- if isinstance(slice_record['oar_job_id'], list):
- oar_bool_answer = {}
- for job_id in slice_record['oar_job_id']:
- ret = self.DeleteJobs(job_id, slice_record['user'])
-
- oar_bool_answer.update(ret)
-
- else:
- oar_bool_answer = self.DeleteJobs(slice_record['oar_job_id'],
- slice_record['user'])
-
- return oar_bool_answer
-
-
-
- def GetLeaseGranularity(self):
- """ Returns the granularity of an experiment in the Iotlab testbed.
- OAR uses seconds for experiments duration , the granulaity is also
- defined in seconds.
- Experiments which last less than 10 min (600 sec) are invalid"""
- return self.grain
-
-
-
- @staticmethod
- def filter_lease(reservation_list, filter_type, filter_value ):
- """Filters the lease reservation list by removing each lease whose
- filter_type is not equal to the filter_value provided. Returns the list
- of leases in one slice, defined by the slice_hrn if filter_type
- is 'slice_hrn'. Otherwise, returns all leases scheduled starting from
- the filter_value if filter_type is 't_from'.
-
- :param reservation_list: leases list
- :type reservation_list: list of dictionary
- :param filter_type: can be either 't_from' or 'slice hrn'
- :type filter_type: string
- :param filter_value: depending on the filter_type, can be the slice_hrn
- or can be defining a timespan.
- :type filter_value: if filter_type is 't_from', filter_value is int.
- if filter_type is 'slice_hrn', filter_value is a string.
-
-
- :returns: filtered_reservation_list, contains only leases running or
- scheduled in the given slice (wanted_slice).Dict keys are
- 'lease_id','reserved_nodes','slice_id', 'state', 'user',
- 'component_id_list','slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list of dict
-
- """
- filtered_reservation_list = list(reservation_list)
- logger.debug("IOTLAB_API \t filter_lease_name reservation_list %s" \
- % (reservation_list))
+ :Example:
+ [{"firstName":"Frederic",
+ "lastName":"Saint-marcel",
+ "email":"frederic.saint-marcel@inria.fr",
+ "structure":"INRIA",
+ "city":"Grenoble",
+ "country":"France",
+ "login":"saintmar",
+ sshPublicKeys":["ssh-rsa AAAAB3..."],
+ "motivations":"test SFA",
+ "validate":true,
+ "admin":true,
+ "createTimeStamp":"20120911115247Z"},
+ {"firstName":"Julien",
+ ...
+ }
+ ]
+ """
+ logger.warning("iotlashell get_users")
+ users_dict = {}
try:
- for reservation in reservation_list:
- if \
- (filter_type is 'slice_hrn' and \
- reservation['slice_hrn'] != filter_value) or \
- (filter_type is 't_from' and \
- reservation['t_from'] > filter_value):
- filtered_reservation_list.remove(reservation)
- except TypeError:
- logger.log_exc("Iotlabshell filter_lease : filter_type %s \
- filter_value %s not in lease" %(filter_type,
- filter_value))
-
- return filtered_reservation_list
-
- # @staticmethod
- # def filter_lease_start_time(reservation_list, timespan):
- # """Filters the lease reservation list by removing each lease whose
- # slice_hrn is not the wanted_slice provided. Returns the list of leases
- # in one slice (wanted_slice).
-
- # """
- # filtered_reservation_list = list(reservation_list)
-
- # for reservation in reservation_list:
- # if 't_from' in reservation and \
- # reservation['t_from'] > timespan:
- # filtered_reservation_list.remove(reservation)
-
- # return filtered_reservation_list
-
-
-
-
-
-
-#TODO FUNCTIONS SECTION 04/07/2012 SA
-
-
- ##TODO UpdateSlice 04/07/2012 SA || Commented out 28/05/13 SA
- ##Funciton should delete and create another job since oin iotlab slice=job
- #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
- #"""Updates the parameters of an existing slice with the values in
- #slice_fields.
- #Users may only update slices of which they are members.
- #PIs may update any of the slices at their sites, or any slices of
- #which they are members. Admins may update any slice.
- #Only PIs and admins may update max_nodes. Slices cannot be renewed
- #(by updating the expires parameter) more than 8 weeks into the future.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- #logger.warning("IOTLAB_API UpdateSlice EMPTY - DO NOTHING \r\n ")
- #return
-
- #Unused SA 30/05/13, we only update the user's key or we delete it.
- ##TODO UpdatePerson 04/07/2012 SA
- #def UpdatePerson(self, iotlab_hrn, federated_hrn, person_fields=None):
- #"""Updates a person. Only the fields specified in person_fields
- #are updated, all other fields are left untouched.
- #Users and techs can only update themselves. PIs can only update
- #themselves and other non-PIs at their sites.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- ##new_row = FederatedToIotlab(iotlab_hrn, federated_hrn)
- ##self.leases_db.testbed_session.add(new_row)
- ##self.leases_db.testbed_session.commit()
-
- #logger.debug("IOTLAB_API UpdatePerson EMPTY - DO NOTHING \r\n ")
- #return
-
-
-
-
- #TODO : test
- def DeleteKey(self, user_record, key_string):
- """Deletes a key in the LDAP entry of the specified user.
-
- Removes the key_string from the user's key list and updates the LDAP
- user's entry with the new key attributes.
-
- :param key_string: The ssh key to remove
- :param user_record: User's record
- :type key_string: string
- :type user_record: dict
- :returns: True if sucessful, False if not.
- :rtype: Boolean
-
- """
-
- all_user_keys = user_record['keys']
- all_user_keys.remove(key_string)
- new_attributes = {'sshPublicKey':all_user_keys}
- ret = self.ldap.LdapModifyUser(user_record, new_attributes)
- logger.debug("IOTLAB_API DeleteKey %s- " % (ret))
- return ret['bool']
-
-
-
-
-
-
-
-
- #Update slice unused, therefore sfa_fields_to_iotlab_fields unused
- #SA 30/05/13
- #@staticmethod
- #def sfa_fields_to_iotlab_fields(sfa_type, hrn, record):
- #"""
- #"""
-
- #iotlab_record = {}
- ##for field in record:
- ## iotlab_record[field] = record[field]
-
- #if sfa_type == "slice":
- ##instantion used in get_slivers ?
- #if not "instantiation" in iotlab_record:
- #iotlab_record["instantiation"] = "iotlab-instantiated"
- ##iotlab_record["hrn"] = hrn_to_pl_slicename(hrn)
- ##Unused hrn_to_pl_slicename because Iotlab's hrn already
- ##in the appropriate form SA 23/07/12
- #iotlab_record["hrn"] = hrn
- #logger.debug("IOTLAB_API.PY sfa_fields_to_iotlab_fields \
- #iotlab_record %s " %(iotlab_record['hrn']))
- #if "url" in record:
- #iotlab_record["url"] = record["url"]
- #if "description" in record:
- #iotlab_record["description"] = record["description"]
- #if "expires" in record:
- #iotlab_record["expires"] = int(record["expires"])
-
- ##nodes added by OAR only and then imported to SFA
- ##elif type == "node":
- ##if not "hostname" in iotlab_record:
- ##if not "hostname" in record:
- ##raise MissingSfaInfo("hostname")
- ##iotlab_record["hostname"] = record["hostname"]
- ##if not "model" in iotlab_record:
- ##iotlab_record["model"] = "geni"
-
- ##One authority only
- ##elif type == "authority":
- ##iotlab_record["login_base"] = hrn_to_iotlab_login_base(hrn)
-
- ##if not "name" in iotlab_record:
- ##iotlab_record["name"] = hrn
-
- ##if not "abbreviated_name" in iotlab_record:
- ##iotlab_record["abbreviated_name"] = hrn
-
- ##if not "enabled" in iotlab_record:
- ##iotlab_record["enabled"] = True
-
- ##if not "is_public" in iotlab_record:
- ##iotlab_record["is_public"] = True
-
- #return iotlab_record
-
-
-
-
-
-
-
-
-
-
+ users = self.api.method('admin/users')
+ except HTTPError as err:
+ logger.warning("iotlashell get_users error %s" % err.reason)
+ return {'error': err.reason}
+ for user in users:
+ users_dict[user['email']] = user
+ return users_dict
+
+ def reserve_nodes(self, login, exp_name,
+ nodes_list, start_time, duration):
+ """
+ Submit a physical experiment (nodes list) and reservation date.
+ """
+ # pylint:disable=W0212,R0913,E1123
+ logger.warning("iotlashell reserve_nodes")
+ exp_file = helpers.FilesDict()
+ _experiment = experiment._Experiment(exp_name, duration, start_time)
+ _experiment.type = 'physical'
+ _experiment.nodes = nodes_list
+ exp_file['new_exp.json'] = helpers.json_dumps(_experiment)
+ try:
+ return self.api.method('admin/experiments?user=%s' % login,
+ 'post',
+ files=exp_file)
+ except HTTPError as err:
+ logger.warning("iotlashell reserve_nodes error %s" % err.reason)
+ return {'error': err.reason}
+
+ def get_reserved_nodes(self):
+ """
+ Get all OAR jobs not terminated.
+
+ :Example:
+ {"total":"1907",
+ "items":[
+ {"id":9960,
+ "resources": ["m3-16.devgrenoble.iot-lab.info",...],
+ "duration":"36000",
+ "name":"test_sniffer",
+ "state":"Running",
+ "owner":"saintmar",
+ "nb_resources":10,
+ "date":1427966468},
+ {"id": ...}
+ ]
+ }
+ """
+ logger.warning("iotlashell get_reserved_nodes")
+ reserved_nodes_dict = {}
+ request = ('admin/experiments?state='
+ 'Running,Waiting,toAckReservation,'
+ 'toLaunch,Launching')
+ try:
+ experiments = self.api.method(request)
+ except HTTPError as err:
+ logger.warning("iotlashell get_reserved_nodes error %s" %
+ err.reason)
+ return {'error': err.reason}
+ for exp in experiments['items']:
+ # BUG IN OAR REST API : job with reservation didn't return
+ # resources attribute list. We use another request for
+ # finding job resources
+ exp_nodes = self.api.method('admin/experiments/%d' % exp['id'])
+ exp['resources'] = exp_nodes['nodes']
+ # BUG ASAP jobs without date information
+ if exp['date'] == "as soon as possible":
+ exp['date'] = 0
+ reserved_nodes_dict[exp['id']] = exp
+ return reserved_nodes_dict
+
+ def add_user(self, slice_user):
+ """
+ Add LDAP user
+ """
+ # pylint:disable=E1123
+ logger.warning("iotlashell add_user")
+ # single account creation
+ user = {"type": "SA",
+ "city": "To be defined",
+ "country": "To be defined",
+ "motivations": "SFA federation"}
+ email = slice_user['email']
+ user['email'] = email
+ user['sshPublicKey'] = slice_user['keys'][0]
+ # ex : onelab.inria
+ user['structure'] = slice_user['slice_record']['authority']
+ email = (email.split('@'))[0]
+ user['firstName'] = email.split('.')[0]
+ try:
+ user['lastName'] = email.split('.')[1]
+ except IndexError:
+ user['lastName'] = email.split('.')[0]
+ try:
+ self.api.method('admin/users', 'post',
+ json=user)
+ except HTTPError as err:
+ logger.warning("iotlashell add_user error %s" % err.reason)
+++ /dev/null
-"""
-This file defines the IotlabSlices class by which all the slice checkings
-upon lease creation are done.
-"""
-from sfa.util.xrn import get_authority, urn_to_hrn, hrn_to_urn
-from sfa.util.sfalogging import logger
-
-MAXINT = 2L**31-1
-
-
-class IotlabSlices:
- """
- This class is responsible for checking the slice when creating a
- lease or a sliver. Those checks include verifying that the user is valid,
- that the slice is known from the testbed or from our peers, that the list
- of nodes involved has not changed (in this case the lease is modified
- accordingly).
- """
- rspec_to_slice_tag = {'max_rate': 'net_max_rate'}
-
- def __init__(self, driver):
- """
- Get the reference to the driver here.
- """
- self.driver = driver
-
- def get_peer(self, xrn):
- """
- Finds the authority of a resource based on its xrn.
- If the authority is Iotlab (local) return None,
- Otherwise, look up in the DB if Iotlab is federated with this site
- authority and returns its DB record if it is the case.
-
- :param xrn: resource's xrn
- :type xrn: string
- :returns: peer record
- :rtype: dict
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
- #Does this slice belong to a local site or a peer iotlab site?
- peer = None
-
- # get this slice's authority (site)
- slice_authority = get_authority(hrn)
- #Iotlab stuff
- #This slice belongs to the current site
- if slice_authority == self.driver.testbed_shell.root_auth:
- site_authority = slice_authority
- return None
-
- site_authority = get_authority(slice_authority).lower()
- # get this site's authority (sfa root authority or sub authority)
-
- logger.debug("IOTLABSLICES \t get_peer slice_authority %s \
- site_authority %s hrn %s"
- % (slice_authority, site_authority, hrn))
-
- # check if we are already peered with this site_authority
- #if so find the peer record
- peers = self.driver.GetPeers(peer_filter=site_authority)
- for peer_record in peers:
- if site_authority == peer_record.hrn:
- peer = peer_record
- logger.debug(" IOTLABSLICES \tget_peer peer %s " % (peer))
- return peer
-
- def get_sfa_peer(self, xrn):
- """Returns the authority name for the xrn or None if the local site
- is the authority.
-
- :param xrn: the xrn of the resource we are looking the authority for.
- :type xrn: string
- :returns: the resources's authority name.
- :rtype: string
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
-
- # return the authority for this hrn or None if we are the authority
- sfa_peer = None
- slice_authority = get_authority(hrn)
- site_authority = get_authority(slice_authority)
-
- if site_authority != self.driver.hrn:
- sfa_peer = site_authority
-
- return sfa_peer
-
- def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
- """
- Compare requested leases with the leases already scheduled/
- running in OAR. If necessary, delete and recreate modified leases,
- and delete no longer requested ones.
-
- :param sfa_slice: sfa slice record
- :param requested_jobs_dict: dictionary of requested leases
- :param peer: sfa peer record
-
- :type sfa_slice: dict
- :type requested_jobs_dict: dict
- :type peer: dict
- :returns: leases list of dictionary
- :rtype: list
-
- """
-
- logger.debug("IOTLABSLICES verify_slice_leases sfa_slice %s "
- % (sfa_slice))
- #First get the list of current leases from OAR
- leases = self.driver.GetLeases({'slice_hrn': sfa_slice['hrn']})
- logger.debug("IOTLABSLICES verify_slice_leases requested_jobs_dict %s \
- leases %s " % (requested_jobs_dict, leases))
-
- current_nodes_reserved_by_start_time = {}
- requested_nodes_by_start_time = {}
- leases_by_start_time = {}
- reschedule_jobs_dict = {}
-
- #Create reduced dictionary with key start_time and value
- # the list of nodes
- #-for the leases already registered by OAR first
- # then for the new leases requested by the user
-
- #Leases already scheduled/running in OAR
- for lease in leases:
- current_nodes_reserved_by_start_time[lease['t_from']] = \
- lease['reserved_nodes']
- leases_by_start_time[lease['t_from']] = lease
-
- #First remove job whose duration is too short
- for job in requested_jobs_dict.values():
- job['duration'] = \
- str(int(job['duration']) \
- * self.driver.testbed_shell.GetLeaseGranularity())
- if job['duration'] < \
- self.driver.testbed_shell.GetLeaseGranularity():
- del requested_jobs_dict[job['start_time']]
-
- #Requested jobs
- for start_time in requested_jobs_dict:
- requested_nodes_by_start_time[int(start_time)] = \
- requested_jobs_dict[start_time]['hostname']
- #Check if there is any difference between the leases already
- #registered in OAR and the requested jobs.
- #Difference could be:
- #-Lease deleted in the requested jobs
- #-Added/removed nodes
- #-Newly added lease
-
- logger.debug("IOTLABSLICES verify_slice_leases \
- requested_nodes_by_start_time %s \
- "% (requested_nodes_by_start_time))
- #Find all deleted leases
- start_time_list = \
- list(set(leases_by_start_time.keys()).\
- difference(requested_nodes_by_start_time.keys()))
- deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
- for start_time in start_time_list]
-
-
- #Find added or removed nodes in exisiting leases
- for start_time in requested_nodes_by_start_time:
- logger.debug("IOTLABSLICES verify_slice_leases start_time %s \
- "%( start_time))
- if start_time in current_nodes_reserved_by_start_time:
-
- # JORDAN : if we request the same nodes: do nothing
- if requested_nodes_by_start_time[start_time] == \
- current_nodes_reserved_by_start_time[start_time]:
- continue
-
- else:
- update_node_set = \
- set(requested_nodes_by_start_time[start_time])
- added_nodes = \
- update_node_set.difference(\
- current_nodes_reserved_by_start_time[start_time])
- shared_nodes = \
- update_node_set.intersection(\
- current_nodes_reserved_by_start_time[start_time])
- old_nodes_set = \
- set(\
- current_nodes_reserved_by_start_time[start_time])
- removed_nodes = \
- old_nodes_set.difference(\
- requested_nodes_by_start_time[start_time])
- logger.debug("IOTLABSLICES verify_slice_leases \
- shared_nodes %s added_nodes %s removed_nodes %s"\
- %(shared_nodes, added_nodes,removed_nodes ))
- #If the lease is modified, delete it before
- #creating it again.
- #Add the deleted lease job id in the list
- #WARNING :rescheduling does not work if there is already
- # 2 running/scheduled jobs because deleting a job
- #takes time SA 18/10/2012
- if added_nodes or removed_nodes:
- deleted_leases.append(\
- leases_by_start_time[start_time]['lease_id'])
- #Reschedule the job
- if added_nodes or shared_nodes:
- reschedule_jobs_dict[str(start_time)] = \
- requested_jobs_dict[str(start_time)]
-
- else:
- #New lease
-
- job = requested_jobs_dict[str(start_time)]
- logger.debug("IOTLABSLICES \
- NEWLEASE slice %s job %s"
- % (sfa_slice, job))
- job_id = self.driver.AddLeases(
- job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
-
- # Removed by jordan
- #if job_id is not None:
- # new_leases = self.driver.GetLeases(login=
- # sfa_slice['login'])
- # for new_lease in new_leases:
- # leases.append(new_lease)
-
- #Deleted leases are the ones with lease id not declared in the Rspec
- if deleted_leases:
- self.driver.testbed_shell.DeleteLeases(deleted_leases,
- sfa_slice['login'])
- #self.driver.testbed_shell.DeleteLeases(deleted_leases,
- # sfa_slice['user']['uid'])
- logger.debug("IOTLABSLICES \
- verify_slice_leases slice %s deleted_leases %s"
- % (sfa_slice, deleted_leases))
-
- if reschedule_jobs_dict:
- for start_time in reschedule_jobs_dict:
- job = reschedule_jobs_dict[start_time]
- self.driver.AddLeases(
- job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
-
- # Added by Jordan: until we find a better solution, always update the list of leases
- return self.driver.GetLeases(login= sfa_slice['login'])
- #return leases
-
- def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
- """Check for wanted and unwanted nodes in the slice.
-
- Removes nodes and associated leases that the user does not want anymore
- by deleteing the associated job in OAR (DeleteSliceFromNodes).
- Returns the nodes' hostnames that are going to be in the slice.
-
- :param sfa_slice: slice record. Must contain node_ids and list_node_ids.
-
- :param requested_slivers: list of requested nodes' hostnames.
- :param peer: unused so far.
-
- :type sfa_slice: dict
- :type requested_slivers: list
- :type peer: string
-
- :returns: list requested nodes hostnames
- :rtype: list
-
- .. warning:: UNUSED SQA 24/07/13
- .. seealso:: DeleteSliceFromNodes
- .. todo:: check what to do with the peer? Can not remove peer nodes from
- slice here. Anyway, in this case, the peer should have gotten the
- remove request too.
-
- """
- current_slivers = []
- deleted_nodes = []
-
- if 'node_ids' in sfa_slice:
- nodes = self.driver.testbed_shell.GetNodes(
- sfa_slice['list_node_ids'],
- ['hostname'])
- current_slivers = [node['hostname'] for node in nodes]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(current_slivers).
- difference(requested_slivers))
-
- logger.debug("IOTLABSLICES \tverify_slice_nodes slice %s\
- \r\n \r\n deleted_nodes %s"
- % (sfa_slice, deleted_nodes))
-
- if deleted_nodes:
- #Delete the entire experience
- self.driver.testbed_shell.DeleteSliceFromNodes(sfa_slice)
- return nodes
-
- def verify_slice(self, slice_hrn, slice_record, sfa_peer):
- """Ensures slice record exists.
-
- The slice record must exist either in Iotlab or in the other
- federated testbed (sfa_peer). If the slice does not belong to Iotlab,
- check if the user already exists in LDAP. In this case, adds the slice
- to the sfa DB and associates its LDAP user.
-
- :param slice_hrn: slice's name
- :param slice_record: sfa record of the slice
- :param sfa_peer: name of the peer authority if any.(not Iotlab).
-
- :type slice_hrn: string
- :type slice_record: dictionary
- :type sfa_peer: string
-
- .. seealso:: AddSlice
-
-
- """
-
- slicename = slice_hrn
- sfa_slice = None
-
- # check if slice belongs to Iotlab
- if slicename.startswith("iotlab"):
- slices_list = self.driver.GetSlices(slice_filter=slicename,
- slice_filter_type='slice_hrn')
-
- if slices_list:
- for sl in slices_list:
-
- logger.debug("IOTLABSLICES \t verify_slice slicename %s \
- slices_list %s sl %s \r slice_record %s"
- % (slicename, slices_list, sl, slice_record))
- sfa_slice = sl
- sfa_slice.update(slice_record)
-
- else:
- #Search for user in ldap based on email SA 14/11/12
- ldap_user = self.driver.testbed_shell.ldap.LdapFindUser(\
- slice_record['user'])
- logger.debug(" IOTLABSLICES \tverify_slice Oups \
- slice_record %s sfa_peer %s ldap_user %s"
- % (slice_record, sfa_peer, ldap_user))
- #User already registered in ldap, meaning user should be in SFA db
- #and hrn = sfa_auth+ uid
- sfa_slice = {'hrn': slicename,
- 'node_list': [],
- 'authority': slice_record['authority'],
- 'gid': slice_record['gid'],
- #'slice_id': slice_record['record_id'],
- 'reg-researchers': slice_record['reg-researchers'],
- 'urn': hrn_to_urn(slicename,'slice'),
- #'peer_authority': str(sfa_peer)
- }
-
- if ldap_user:
-# hrn = self.driver.testbed_shell.root_auth + '.' \
-# + ldap_user['uid']
- for hrn in slice_record['reg-researchers']:
- user = self.driver.get_user_record(hrn)
- if user:
- break
-
- logger.debug(" IOTLABSLICES \tverify_slice hrn %s USER %s"
- % (hrn, user))
-
- # add the external slice to the local SFA iotlab DB
- if sfa_slice:
- self.driver.AddSlice(sfa_slice, user)
-
- logger.debug("IOTLABSLICES \tverify_slice ADDSLICE OK")
- return sfa_slice
-
-
- def verify_persons(self, slice_hrn, slice_record, users, options=None):
- """Ensures the users in users list exist and are enabled in LDAP. Adds
- person if needed (AddPerson).
-
- Checking that a user exist is based on the user's email. If the user is
- still not found in the LDAP, it means that the user comes from another
- federated testbed. In this case an account has to be created in LDAP
- so as to enable the user to use the testbed, since we trust the testbed
- he comes from. This is done by calling AddPerson.
-
- :param slice_hrn: slice name
- :param slice_record: record of the slice_hrn
- :param users: users is a record list. Records can either be
- local records or users records from known and trusted federated
- sites.If the user is from another site that iotlab doesn't trust
- yet, then Resolve will raise an error before getting to allocate.
-
- :type slice_hrn: string
- :type slice_record: string
- :type users: list
-
- .. seealso:: AddPerson
- .. note:: Removed unused peer and sfa_peer parameters. SA 18/07/13.
- """
- if options is None: options={}
- user = slice_record['user']
- logger.debug("IOTLABSLICES \tverify_persons \tuser %s " % user)
- person = {
- 'peer_person_id': None,
- 'mail' : user['email'],
- 'email' : user['email'],
- 'key_ids' : user.get('key_ids', []),
- 'hrn' : user['hrn'],
- }
- if 'first_name' in user:
- person['first_name'] = user['first_name']
- if 'last_name' in user:
- person['last_name'] = user['last_name']
- if 'person_id' in user:
- person['person_id'] = user['person_id']
- if user['keys']:
- # Only one key is kept for IoTLAB
- person['pkey'] = user['keys'][0]
- # SFA DB (if user already exist we do nothing)
- self.driver.add_person_to_db(person)
- # Iot-LAB LDAP (if user already exist we do nothing)
- ret = self.driver.AddPerson(person)
- # user uid information is only in LDAP
- # Be carreful : global scope of dict slice_record in driver
- slice_record['login'] = ret['uid']
- return person
-
-
-
- def verify_keys(self, persons, users, peer, options=None):
- """
- .. warning:: unused
- """
- if options is None: options={}
- # existing keys
- key_ids = []
- for person in persons:
- key_ids.extend(person['key_ids'])
- keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
-
- keydict = {}
- for key in keylist:
- keydict[key['key']] = key['key_id']
- existing_keys = keydict.keys()
-
- persondict = {}
- for person in persons:
- persondict[person['email']] = person
-
- # add new keys
- requested_keys = []
- updated_persons = []
- users_by_key_string = {}
- for user in users:
- user_keys = user.get('keys', [])
- updated_persons.append(user)
- for key_string in user_keys:
- users_by_key_string[key_string] = user
- requested_keys.append(key_string)
- if key_string not in existing_keys:
- key = {'key': key_string, 'key_type': 'ssh'}
- #try:
- ##if peer:
- #person = persondict[user['email']]
- #self.driver.testbed_shell.UnBindObjectFromPeer(
- # 'person',person['person_id'],
- # peer['shortname'])
- ret = self.driver.testbed_shell.AddPersonKey(
- user['email'], key)
- #if peer:
- #key_index = user_keys.index(key['key'])
- #remote_key_id = user['key_ids'][key_index]
- #self.driver.testbed_shell.BindObjectToPeer('key', \
- #key['key_id'], peer['shortname'], \
- #remote_key_id)
-
- # remove old keys (only if we are not appending)
- append = options.get('append', True)
- if append is False:
- removed_keys = set(existing_keys).difference(requested_keys)
- for key in removed_keys:
- #if peer:
- #self.driver.testbed_shell.UnBindObjectFromPeer('key', \
- #key, peer['shortname'])
-
- user = users_by_key_string[key]
- self.driver.testbed_shell.DeleteKey(user, key)
-
- return
+++ /dev/null
-""" specialized Xrn class for Iotlab. SA
-"""
-import re
-from sfa.util.xrn import Xrn
-
-def xrn_to_hostname(xrn):
- """Returns a node's hostname from its xrn.
- :param xrn: The nodes xrn identifier.
- :type xrn: Xrn (from sfa.util.xrn)
-
- :returns: node's hostname.
- :rtype: string
-
- """
- return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
-
-
-def xrn_object(root_auth, hostname):
- """Creates a valid xrn object from the node's hostname and the authority
- of the SFA server.
-
- :param hostname: the node's hostname.
- :param root_auth: the SFA root authority.
- :type hostname: string
- :type root_auth: string
-
- :returns: the iotlab node's xrn
- :rtype: Xrn
-
- """
- return Xrn('.'.join([root_auth, Xrn.escape(hostname)]), type='node')
-
-# temporary helper functions to use this module instead of namespace
-def hostname_to_hrn (auth, hostname):
- """Turns node hostname into hrn.
- :param auth: Site authority.
- :type auth: string
- :param hostname: Node hostname.
- :type hostname: string.
-
- :returns: Node's hrn.
- :rtype: string
- """
- return IotlabXrn(auth=auth, hostname=hostname).get_hrn()
-
-def hostname_to_urn(auth, hostname):
- """Turns node hostname into urn.
- :param auth: Site authority.
- :type auth: string
- :param hostname: Node hostname.
- :type hostname: string.
-
- :returns: Node's urn.
- :rtype: string
- """
- return IotlabXrn(auth=auth, hostname=hostname).get_urn()
-
-# def slicename_to_hrn (auth_hrn, slicename):
- # return IotlabXrn(auth=auth_hrn, slicename=slicename).get_hrn()
-
-# def hrn_to_iotlab_slicename (hrn):
-# return IotlabXrn(xrn=hrn, type='slice').iotlab_slicename()
-
-# def hrn_to_iotlab_authname (hrn):
-# return IotlabXrn(xrn=hrn, type='any').iotlab_authname()
-
-
-class IotlabXrn (Xrn):
- """
- Defines methods to turn a hrn/urn into a urn/hrn, or to get the name
- of the slice/user from the hrn.
- """
- @staticmethod
- def site_hrn (auth):
- """Returns the site hrn, which is also the testbed authority in
- iotlab/cortexlab.
- """
- return auth
-
- def __init__ (self, auth=None, hostname=None, login=None,
- slicename=None, **kwargs):
- #def hostname_to_hrn(auth_hrn, login_base, hostname):
- if hostname is not None:
- self.type = 'node'
- # keep only the first part of the DNS name
- # escape the '.' in the hostname
- self.hrn = '.'.join( [auth, Xrn.escape(hostname)] )
- self.hrn_to_urn()
-
- elif login is not None:
- self.type = 'person'
- self.hrn = '.'.join([auth, login])
- self.hrn_to_urn()
- #def slicename_to_hrn(auth_hrn, slicename):
- elif slicename is not None:
- self.type = 'slice'
- slicename = '_'.join([login, "slice"])
- self.hrn = '.'.join([auth, slicename])
- self.hrn_to_urn()
- # split at the first _
-
- else:
- Xrn.__init__ (self, **kwargs)
-
-
- def iotlab_slicename (self):
- """Returns the slice name from an iotlab slice hrn.
-
- :rtype: string
- :returns: slice name.
- """
-
- self._normalize()
- leaf = self.leaf
- sliver_id_parts = leaf.split(':')
- name = sliver_id_parts[0]
- name = re.sub('[^a-zA-Z0-9_]', '', name)
- return name
-
- #def hrn_to_pl_authname(hrn):
- # def iotlab_authname (self):
- # self._normalize()
- # return self.authority[-1]
-
- # def iotlab_login_base (self):
- # self._normalize()
- # if self.type and self.type.startswith('authority'):
- # base = self.leaf
- # else:
- # base = self.authority[-1]
-
- # # Fix up names of GENI Federates
- # base = base.lower()
- # base = re.sub('\\\[^a-zA-Z0-9]', '', base)
-
- # if len(base) > 20:
- # base = base[len(base)-20:]
-
- # return base
# e.g. registry calls this 'reg-researchers'
# while some drivers call this 'researcher'
# we need to make sure that both keys appear and are the same
-def _normalize_input (record, reg_key, driver_key):
+def _normalize_input(record, reg_key, driver_key):
# this looks right, use this for both keys
if reg_key in record:
# and issue a warning if they were both set and different
# as we're overwriting some user data here
if driver_key in record:
- logger.warning ("normalize_input: incoming record has both values, using %s"%reg_key)
- record[driver_key]=record[reg_key]
+ logger.warning ("normalize_input: incoming record has both values, using {}"
+ .format(reg_key))
+ record[driver_key] = record[reg_key]
# we only have one key set, duplicate for the other one
elif driver_key in record:
- logger.warning ("normalize_input: you should use '%s' instead of '%s'"%(reg_key,driver_key))
- record[reg_key]=record[driver_key]
+ logger.warning ("normalize_input: you should use '{}' instead of '{}'"
+ .format(reg_key, driver_key))
+ record[reg_key] = record[driver_key]
def normalize_input_record (record):
_normalize_input (record, 'reg-researchers','researcher')
# xxx the keys thing could use a little bit more attention:
# some parts of the code are using 'keys' while they should use 'reg-keys'
# but I run out of time for now
- if 'reg-keys' in record: record['keys']=record['reg-keys']
+ if 'reg-keys' in record:
+ record['keys'] = record['reg-keys']
return record
class RegistryManager:
def __init__ (self, config):
- logger.info("Creating RegistryManager[%s]"%id(self))
+ logger.info("Creating RegistryManager[{}]".format(id(self)))
# The GENI GetVersion call
def GetVersion(self, api, options):
# get record info
dbsession = api.dbsession()
- record=dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+ record = dbsession.query(RegRecord).filter_by(type=type, hrn=hrn).first()
if not record:
- raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
+ raise RecordNotFound("hrn={}, type={}".format(hrn, type))
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
else:
caller_record = dbsession.query(RegRecord).filter_by(hrn=caller_hrn).first()
if not caller_record:
- raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
+ raise RecordNotFound(
+ "Unable to associated caller (hrn={}, type={}) with credential for (hrn: {}, type: {})"
+ .format(caller_hrn, caller_type, hrn, type))
caller_gid = GID(string=caller_record.gid)
object_hrn = record.get_gid_object().get_hrn()
rights = api.auth.determine_user_rights(caller_hrn, record)
# make sure caller has rights to this object
if rights.is_empty():
- raise PermissionError("%s has no rights to %s (%s)" % \
- (caller_hrn, object_hrn, xrn))
+ raise PermissionError("{} has no rights to {} ({})"
+ .format(caller_hrn, object_hrn, xrn))
object_gid = GID(string=record.gid)
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(caller_gid)
local_records = dbsession.query(RegRecord).filter(RegRecord.hrn.in_(local_hrns))
if type:
local_records = local_records.filter_by(type=type)
- local_records=local_records.all()
+ local_records = local_records.all()
for local_record in local_records:
- augment_with_sfa_builtins (local_record)
+ augment_with_sfa_builtins(local_record)
- logger.info("Resolve, (details=%s,type=%s) local_records=%s "%(details,type,local_records))
+ logger.info("Resolve, (details={}, type={}) local_records={} "
+ .format(details, type, local_records))
local_dicts = [ record.__dict__ for record in local_records ]
if details:
# used to be in the driver code, sounds like a poorman thing though
def solve_neighbour_url (record):
if not record.type.startswith('authority'): return
- hrn=record.hrn
+ hrn = record.hrn
for neighbour_dict in [ api.aggregates, api.registries ]:
if hrn in neighbour_dict:
record.url=neighbour_dict[hrn].get_url()
return
- for record in local_records: solve_neighbour_url (record)
+ for record in local_records:
+ solve_neighbour_url (record)
# convert local record objects to dicts for xmlrpc
# xxx somehow here calling dict(record) issues a weird error
# however record.todict() seems to work fine
# records.extend( [ dict(record) for record in local_records ] )
- records.extend( [ record.todict(exclude_types=[InstrumentedList]) for record in local_records ] )
+ records.extend( [ record.record_to_dict(exclude_types=(InstrumentedList,)) for record in local_records ] )
if not records:
raise RecordNotFound(str(hrns))
def List (self, api, xrn, origin_hrn=None, options=None):
if options is None: options={}
- dbsession=api.dbsession()
+ dbsession = api.dbsession()
# load all know registry names into a prefix tree and attempt to find
# the longest matching prefix
hrn, type = urn_to_hrn(xrn)
record_dicts = record_list
# if we still have not found the record yet, try the local registry
-# logger.debug("before trying local records, %d foreign records"% len(record_dicts))
+# logger.debug("before trying local records, {} foreign records".format(len(record_dicts)))
if not record_dicts:
recursive = False
if ('recursive' in options and options['recursive']):
raise MissingAuthority(hrn)
if recursive:
records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn)).all()
-# logger.debug("recursive mode, found %d local records"%(len(records)))
+# logger.debug("recursive mode, found {} local records".format(len(records)))
else:
records = dbsession.query(RegRecord).filter_by(authority=hrn).all()
-# logger.debug("non recursive mode, found %d local records"%(len(records)))
+# logger.debug("non recursive mode, found {} local records".format(len(records)))
# so that sfi list can show more than plain names...
- for record in records: augment_with_sfa_builtins (record)
- record_dicts=[ record.todict(exclude_types=[InstrumentedList]) for record in records ]
+ for record in records:
+ # xxx mystery - see also the bottom of model.py
+ # resulting records have been observed to not always have
+ # their __dict__ actually in line with the object's contents;
+ # was first observed with authorities' 'name' column
+ # that would be missing from result as received by client
+ augment_with_sfa_builtins(record)
+ record_dicts = [ record.record_to_dict(exclude_types=(InstrumentedList,)) for record in records ]
return record_dicts
# Add the email of the user to SubjectAltName in the GID
email = None
hrn = Xrn(xrn).get_hrn()
- dbsession=api.dbsession()
- record=dbsession.query(RegUser).filter_by(hrn=hrn).first()
+ dbsession = api.dbsession()
+ record = dbsession.query(RegUser).filter_by(hrn=hrn).first()
if record:
- email=getattr(record,'email',None)
- gid = api.auth.hierarchy.create_gid(xrn, create_uuid(), pkey, email = email)
+ email = getattr(record,'email',None)
+ gid = api.auth.hierarchy.create_gid(xrn, create_uuid(), pkey, email=email)
return gid.save_to_string(save_parents=True)
####################
# hrns is the list of hrns that should be linked to the subject from now on
# target_type would be e.g. 'user' in the 'slice' x 'researcher' example
def update_driver_relation (self, api, record_obj, hrns, target_type, relation_name):
- dbsession=api.dbsession()
+ dbsession = api.dbsession()
# locate the linked objects in our db
subject_type=record_obj.type
subject_id=record_obj.pointer
def Register(self, api, record_dict):
- logger.debug("Register: entering with record_dict=%s"%printable(record_dict))
+ logger.debug("Register: entering with record_dict={}".format(printable(record_dict)))
normalize_input_record (record_dict)
- logger.debug("Register: normalized record_dict=%s"%printable(record_dict))
+ logger.debug("Register: normalized record_dict={}".format(printable(record_dict)))
- dbsession=api.dbsession()
+ dbsession = api.dbsession()
hrn, type = record_dict['hrn'], record_dict['type']
urn = hrn_to_urn(hrn,type)
# validate the type
raise UnknownSfaType(type)
# check if record_dict already exists
- existing_records = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).all()
+ existing_records = dbsession.query(RegRecord).filter_by(type=type, hrn=hrn).all()
if existing_records:
raise ExistingRecord(hrn)
if pub_key and isinstance(pub_key, types.ListType): pub_key = pub_key[0]
pkey = convert_public_key(pub_key)
- email=getattr(record,'email',None)
+ email = getattr(record,'email',None)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey, email = email)
gid = gid_object.save_to_string(save_parents=True)
record.gid = gid
keys=getattr(record,'reg-keys')
# some people send the key as a string instead of a list of strings
if isinstance(keys,types.StringTypes): keys=[keys]
- logger.debug ("creating %d keys for user %s"%(len(keys),record.hrn))
+ logger.debug ("creating {} keys for user {}".format(len(keys), record.hrn))
record.reg_keys = [ RegKey (key) for key in keys ]
# update testbed-specific data if needed
def Update(self, api, record_dict):
- logger.debug("Update: entering with record_dict=%s"%printable(record_dict))
+ logger.debug("Update: entering with record_dict={}".format(printable(record_dict)))
normalize_input_record (record_dict)
- logger.debug("Update: normalized record_dict=%s"%printable(record_dict))
+ logger.debug("Update: normalized record_dict={}".format(printable(record_dict)))
- dbsession=api.dbsession()
+ dbsession = api.dbsession()
assert ('type' in record_dict)
- new_record=make_record(dict=record_dict)
- (type,hrn) = (new_record.type, new_record.hrn)
+ new_record = make_record(dict=record_dict)
+ (type, hrn) = (new_record.type, new_record.hrn)
# make sure the record exists
- record = dbsession.query(RegRecord).filter_by(type=type,hrn=hrn).first()
+ record = dbsession.query(RegRecord).filter_by(type=type, hrn=hrn).first()
if not record:
- raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
+ raise RecordNotFound("hrn={}, type={}".format(hrn, type))
record.just_updated()
# Use the pointer from the existing record, not the one that the user
pointer = record.pointer
# is there a change in keys ?
- new_key=None
- if type=='user':
- if getattr(new_record,'keys',None):
- new_key=new_record.keys
- if isinstance (new_key,types.ListType):
- new_key=new_key[0]
+ new_key = None
+ if type == 'user':
+ if getattr(new_record, 'keys', None):
+ new_key = new_record.keys
+ if isinstance (new_key, types.ListType):
+ new_key = new_key[0]
# take new_key into account
if new_key:
uuid = create_uuid()
urn = hrn_to_urn(hrn,type)
- email=getattr(new_record,'email',None)
+ email = getattr(new_record, 'email', None)
if email is None:
- email=getattr(record,'email',None)
+ email = getattr(record, 'email', None)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey, email = email)
gid = gid_object.save_to_string(save_parents=True)
# not too big a deal with planetlab as the driver is authoritative, but...
# update native relations
- if isinstance (record, RegSlice):
- researcher_hrns = getattr(new_record,'reg-researchers',None)
- if researcher_hrns is not None: record.update_researchers (researcher_hrns, dbsession)
-
- elif isinstance (record, RegAuthority):
- pi_hrns = getattr(new_record,'reg-pis',None)
- if pi_hrns is not None: record.update_pis (pi_hrns, dbsession)
+ if isinstance(record, RegSlice):
+ researcher_hrns = getattr(new_record, 'reg-researchers', None)
+ if researcher_hrns is not None:
+ record.update_researchers (researcher_hrns, dbsession)
+
+ elif isinstance(record, RegAuthority):
+ pi_hrns = getattr(new_record, 'reg-pis', None)
+ if pi_hrns is not None:
+ record.update_pis(pi_hrns, dbsession)
+ name = getattr(new_record, 'name', None)
+ if name is not None:
+ record.name = name
+
+ elif isinstance(record, RegUser):
+ email = getattr(new_record, 'email', None)
+ if email is not None:
+ record.email = email
# update the PLC information that was specified with the record
- # xxx oddly enough, without this useless statement,
+ # xxx mystery -- see also the bottom of model.py,
+ # oddly enough, without this useless statement,
# record.__dict__ as received by the driver seems to be off
- # anyway the driver should receive an object
+ # anyway the driver should receive an object
# (and then extract __dict__ itself if needed)
- print "DO NOT REMOVE ME before driver.update, record=%s"%record
+ print "DO NOT REMOVE ME before driver.update, record={}".format(record)
+ # as of June 2015: I suspect we could remove that print line above and replace it with
+ # augment_with_sfa_builtins(record)
+ # instead, that checks for these fields, like it is done above in List()
+ # but that would need to be confirmed by more extensive tests
new_key_pointer = -1
try:
(pointer, new_key_pointer) = api.driver.update (record.__dict__, new_record.__dict__, hrn, new_key)
except:
pass
if new_key and new_key_pointer:
- record.reg_keys=[ RegKey (new_key, new_key_pointer)]
+ record.reg_keys = [ RegKey(new_key, new_key_pointer) ]
record.gid = gid
dbsession.commit()
# update membership for researchers, pis, owners, operators
- self.update_driver_relations (api, record, new_record)
+ self.update_driver_relations(api, record, new_record)
return 1
# expecting an Xrn instance
def Remove(self, api, xrn, origin_hrn=None):
- dbsession=api.dbsession()
- hrn=xrn.get_hrn()
- type=xrn.get_type()
- request=dbsession.query(RegRecord).filter_by(hrn=hrn)
+ dbsession = api.dbsession()
+ hrn = xrn.get_hrn()
+ type = xrn.get_type()
+ request = dbsession.query(RegRecord).filter_by(hrn=hrn)
if type and type not in ['all', '*']:
- request=request.filter_by(type=type)
+ request = request.filter_by(type=type)
record = request.first()
if not record:
- msg="Could not find hrn %s"%hrn
- if type: msg += " type=%s"%type
+ msg = "Could not find hrn {}".format(hrn)
+ if type: msg += " type={}".format(type)
raise RecordNotFound(msg)
type = record.type
# This is a PLC-specific thing, won't work with other platforms
def get_key_from_incoming_ip (self, api):
- dbsession=api.dbsession()
+ dbsession = api.dbsession()
# verify that the callers's ip address exist in the db and is an interface
# for a node in the db
(ip, port) = api.remote_addr
interfaces = api.driver.shell.GetInterfaces({'ip': ip}, ['node_id'])
if not interfaces:
- raise NonExistingRecord("no such ip %(ip)s" % locals())
+ raise NonExistingRecord("no such ip {}".format(ip))
nodes = api.driver.shell.GetNodes([interfaces[0]['node_id']], ['node_id', 'hostname'])
if not nodes:
- raise NonExistingRecord("no such node using ip %(ip)s" % locals())
+ raise NonExistingRecord("no such node using ip {}".format(ip))
node = nodes[0]
# look up the sfa record
- record=dbsession.query(RegRecord).filter_by(type='node',pointer=node['node_id']).first()
+ record = dbsession.query(RegRecord).filter_by(type='node', pointer=node['node_id']).first()
if not record:
- raise RecordNotFound("node with pointer %s"%node['node_id'])
+ raise RecordNotFound("node with pointer {}".format(node['node_id']))
# generate a new keypair and gid
uuid = create_uuid()
pkey = Keypair(create=True)
urn = hrn_to_urn(record.hrn, record.type)
- email=getattr(record,'email',None)
+ email = getattr(record, 'email', None)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey, email)
gid = gid_object.save_to_string(save_parents=True)
record.gid = gid
scp = "/usr/bin/scp"
#identity = "/etc/planetlab/root_ssh_key.rsa"
identity = "/etc/sfa/root_ssh_key"
- scp_options=" -i %(identity)s " % locals()
- scp_options+="-o StrictHostKeyChecking=no " % locals()
- scp_key_command="%(scp)s %(scp_options)s %(key_filename)s root@%(host)s:%(key_dest)s" %\
- locals()
- scp_gid_command="%(scp)s %(scp_options)s %(gid_filename)s root@%(host)s:%(gid_dest)s" %\
- locals()
+ scp_options = " -i {identity} ".format(**locals())
+ scp_options += "-o StrictHostKeyChecking=no "
+ scp_key_command = "{scp} {scp_options} {key_filename} root@{host}:{key_dest}"\
+ .format(**locals())
+ scp_gid_command = "{scp} {scp_options} {gid_filename} root@{host}:{gid_dest}"\
+ .format(**locals())
all_commands = [scp_key_command, scp_gid_command]
+++ /dev/null
-import types
-# for get_key_from_incoming_ip
-import tempfile
-import os
-import commands
-
-from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
- UnknownSfaType, ExistingRecord, NonExistingRecord
-from sfa.util.sfatime import utcparse, datetime_to_epoch
-from sfa.util.prefixTree import prefixTree
-from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
-from sfa.util.version import version_core
-from sfa.util.sfalogging import logger
-
-from sfa.trust.gid import GID
-from sfa.trust.credential import Credential
-from sfa.trust.certificate import Certificate, Keypair, convert_public_key
-from sfa.trust.gid import create_uuid
-
-from sfa.storage.model import make_record,RegRecord
-from sfa.storage.alchemy import dbsession
-
-from sfa.managers.registry_manager import RegistryManager
-
-class RegistryManager(RegistryManager):
-
- def GetCredential(self, api, xrn, type, caller_xrn = None):
- # convert xrn to hrn
- if type:
- hrn = urn_to_hrn(xrn)[0]
- else:
- hrn, type = urn_to_hrn(xrn)
-
- # Is this a root or sub authority
- auth_hrn = api.auth.get_authority(hrn)
- if not auth_hrn or hrn == api.config.SFA_INTERFACE_HRN:
- auth_hrn = hrn
- auth_info = api.auth.get_auth_info(auth_hrn)
- # get record info
- filter = {'hrn': hrn}
- if type:
- filter['type'] = type
- record=dbsession.query(RegRecord).filter_by(**filter).first()
- if not record:
- raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
-
- # verify_cancreate_credential requires that the member lists
- # (researchers, pis, etc) be filled in
- logger.debug("get credential before augment dict, keys=%s"%record.__dict__.keys())
- self.driver.augment_records_with_testbed_info (record.__dict__)
- logger.debug("get credential after augment dict, keys=%s"%record.__dict__.keys())
- if not self.driver.is_enabled (record.__dict__):
- raise AccountNotEnabled(": PlanetLab account %s is not enabled. Please contact your site PI" %(record.email))
-
- # get the callers gid
- # if caller_xrn is not specified assume the caller is the record
- # object itself.
- if not caller_xrn:
- caller_hrn = hrn
- caller_gid = record.get_gid_object()
- else:
- caller_hrn, caller_type = urn_to_hrn(caller_xrn)
- caller_filter = {'hrn': caller_hrn}
- if caller_type:
- caller_filter['type'] = caller_type
- caller_record = dbsession.query(RegRecord).filter_by(**caller_filter).first()
- if not caller_record:
- raise RecordNotFound("Unable to associated caller (hrn=%s, type=%s) with credential for (hrn: %s, type: %s)"%(caller_hrn, caller_type, hrn, type))
- caller_gid = GID(string=caller_record.gid)
-
- object_hrn = record.get_gid_object().get_hrn()
- rights = api.auth.determine_user_rights(caller_hrn, record.todict())
- # make sure caller has rights to this object
- if rights.is_empty():
- raise PermissionError(caller_hrn + " has no rights to " + record.hrn)
-
- object_gid = GID(string=record.gid)
- new_cred = Credential(subject = object_gid.get_subject())
- new_cred.set_gid_caller(caller_gid)
- new_cred.set_gid_object(object_gid)
- new_cred.set_issuer_keys(auth_info.get_privkey_filename(), auth_info.get_gid_filename())
- #new_cred.set_pubkey(object_gid.get_pubkey())
- new_cred.set_privileges(rights)
- new_cred.get_privileges().delegate_all_privileges(True)
- if hasattr(record,'expires'):
- date = utcparse(record.expires)
- expires = datetime_to_epoch(date)
- new_cred.set_expiration(int(expires))
- auth_kind = "authority,ma,sa"
- # Parent not necessary, verify with certs
- #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
- new_cred.encode()
- new_cred.sign()
-
- return new_cred.save_to_string(save_parents=True)
-
-
- # subject_record describes the subject of the relationships
- # ref_record contains the target values for the various relationships we need to manage
- # (to begin with, this is just the slice x person relationship)
- def update_relations (self, subject_obj, ref_obj):
- type=subject_obj.type
- if type=='slice':
- self.update_relation(subject_obj, 'researcher', ref_obj.researcher, 'user')
-
- # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
- # hrns is the list of hrns that should be linked to the subject from now on
- # target_type would be e.g. 'user' in the 'slice' x 'researcher' example
- def update_relation (self, record_obj, field_key, hrns, target_type):
- # locate the linked objects in our db
- subject_type=record_obj.type
- subject_id=record_obj.pointer
- # get the 'pointer' field of all matching records
- link_id_tuples = dbsession.query(RegRecord.pointer).filter_by(type=target_type).filter(RegRecord.hrn.in_(hrns)).all()
- # sqlalchemy returns named tuples for columns
- link_ids = [ tuple.pointer for tuple in link_id_tuples ]
- self.driver.update_relation (subject_type, target_type, subject_id, link_ids)
-
@param slice_urn (string) URN of slice to allocate to
@param credentials (dict) of credentials
@param rspec (string) rspec to allocate
-
+ @param options (dict)
+
+ As of 3.1.16, the PL driver implements here an important option named
+ 'pltags' that affects the management of slice tags.
+
+ This option can take 3 values
+ (*) options['pltags'] == 'ignore' (default)
+ This is the recommended mode; in this mode all slice tags passed
+ here are ignore, which correspond to the <planetlab:attribute> XML tags in
+ the <sliver_type> areas of incoming rspec to Allocate.
+ In other words you are guaranteed to leave slice tags alone.
+ (*) options['pltags'] == 'append'
+ All incoming slice tags are added to corresponding slivers,
+ unless an exact match can be found in the PLC db
+ (*) options['pltags'] == 'sync'
+ The historical mode, that attempts to leave the PLC db in a state
+ in sync with the ones specified in incoming rspec.
+
+ See also http://svn.planet-lab.org/wiki/SFASliceTags
+
"""
interfaces = ['aggregate', 'slicemgr']
accepts = [
options['geni_rspec_version'] = options['rspec_version']
else:
raise SfaInvalidArgument('Must specify an rspec version option. geni_rspec_version cannot be null')
- valid_creds = self.api.auth.checkCredentialsSpeaksFor(creds, 'listnodes', urns,
- check_sliver_callback = self.api.driver.check_sliver_credentials,
- options=options)
+ valid_creds = self.api.auth.checkCredentialsSpeaksFor(
+ creds, 'listnodes', urns,
+ check_sliver_callback = self.api.driver.check_sliver_credentials,
+ options=options)
# get hrn of the original caller
origin_hrn = options.get('origin_hrn', None)
def get_node_tags(self, filter=None):
if filter is None: filter={}
node_tags = {}
- for node_tag in self.driver.shell.GetNodeTags(filter):
+ for node_tag in self.driver.shell.GetNodeTags(filter, ['tagname', 'value', 'node_id', 'node_tag_id'] ):
node_tags[node_tag['node_tag_id']] = node_tag
return node_tags
if slice_ids:
filter['slice_id'] = list(slice_ids)
# get all slices
- all_slices = self.driver.shell.GetSlices(filter, ['slice_id', 'name', 'hrn', 'person_ids', 'node_ids', 'slice_tag_ids', 'expires'])
+ fields = ['slice_id', 'name', 'hrn', 'person_ids', 'node_ids', 'slice_tag_ids', 'expires']
+ all_slices = self.driver.shell.GetSlices(filter, fields)
if slice_hrn:
slices = [slice for slice in all_slices if slice['hrn'] == slice_hrn]
else:
slices = all_slices
if not slices:
+ if slice_hrn:
+ logger.error("PlAggregate.get_slivers : no slice found with hrn {}".format(slice_hrn))
+ else:
+ logger.error("PlAggregate.get_slivers : no sliver found with urns {}".format(urns))
return []
slice = slices[0]
slice['hrn'] = slice_hrn
if node_ids:
node_ids = [node_id for node_id in node_ids if node_id in slice['node_ids']]
slice['node_ids'] = node_ids
- tags_dict = self.get_slice_tags(slice)
+ pltags_dict = self.get_pltags_by_node_id(slice)
nodes_dict = self.get_slice_nodes(slice, options)
slivers = []
for node in nodes_dict.values():
node.update(slice)
- node['tags'] = tags_dict[node['node_id']]
+ # slice-global tags
+ node['slice-tags'] = pltags_dict['slice-global']
+ # xxx
+ # this is where we chould maybe add the nodegroup slice tags,
+ # but it's tedious...
+ # xxx
+ # sliver tags
+ node['slice-tags'] += pltags_dict[node['node_id']]
sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
node['sliver_id'] = Xrn(sliver_hrn, type='sliver').urn
node['urn'] = node['sliver_id']
node['services_user'] = users
slivers.append(node)
+ if not slivers:
+ logger.warning("PlAggregate.get_slivers : slice(s) found but with no sliver {}".format(urns))
return slivers
def node_to_rspec_node(self, node, sites, interfaces, node_tags, pl_initscripts=None, grain=None, options=None):
interface['client_id'] = "%s:%s" % (node['node_id'], if_id)
rspec_node['interfaces'].append(interface)
if_count+=1
- tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids'] if tag_id in node_tags]
- rspec_node['tags'] = tags
+ # this is what describes a particular node
+ node_level_tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids'] if tag_id in node_tags]
+ rspec_node['tags'] = node_level_tags
return rspec_node
- def sliver_to_rspec_node(self, sliver, sites, interfaces, node_tags, \
+ def sliver_to_rspec_node(self, sliver, sites, interfaces, node_tags, sliver_pltags, \
pl_initscripts, sliver_allocations):
# get the granularity in second for the reservation system
grain = self.driver.shell.GetLeaseGranularity()
rspec_node = self.node_to_rspec_node(sliver, sites, interfaces, node_tags, pl_initscripts, grain)
+ for pltag in sliver_pltags:
+ logger.debug("Need to expose {}".format(pltag))
# xxx how to retrieve site['login_base']
rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
# remove interfaces from manifest
rspec_node['interfaces'] = []
# add sliver info
rspec_sliver = Sliver({'sliver_id': sliver['urn'],
- 'name': sliver['name'],
- 'type': 'plab-vserver',
- 'tags': []})
+ 'name': sliver['name'],
+ 'type': 'plab-vserver',
+ 'tags': sliver_pltags,
+ })
rspec_node['sliver_id'] = rspec_sliver['sliver_id']
if sliver['urn'] in sliver_allocations:
rspec_node['client_id'] = sliver_allocations[sliver['urn']].client_id
rspec_node['slivers'] = [rspec_sliver]
# slivers always provide the ssh service
- login = Login({'authentication': 'ssh-keys',
- 'hostname': sliver['hostname'],
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': sliver['hostname'],
'port':'22',
'username': sliver['name'],
'login': sliver['name']
})
service = ServicesElement({'login': login,
- 'services_user': sliver['services_user']})
- rspec_node['services'] = [service]
- return rspec_node
+ 'services_user': sliver['services_user']})
+ rspec_node['services'] = [service]
+ return rspec_node
- def get_slice_tags(self, slice):
+ def get_pltags_by_node_id(self, slice):
slice_tag_ids = []
slice_tag_ids.extend(slice['slice_tag_ids'])
- tags = self.driver.shell.GetSliceTags({'slice_tag_id': slice_tag_ids})
+ tags = self.driver.shell.GetSliceTags({'slice_tag_id': slice_tag_ids},
+ ['tagname', 'value', 'node_id', 'nodegroup_id'])
# sorted by node_id
- tags_dict = defaultdict(list)
+ pltags_dict = defaultdict(list)
for tag in tags:
- tags_dict[tag['node_id']] = tag
- return tags_dict
+ # specific to a node
+ if tag['node_id']:
+ tag['scope'] = 'sliver'
+ pltags_dict[tag['node_id']].append(PLTag(tag))
+ # restricted to a nodegroup
+ # for now such tags are not exposed to describe
+ # xxx we should also expose the nodegroup name in this case to be complete..
+ elif tag['nodegroup_id']:
+ tag['scope'] = 'nodegroup'
+ pltags_dict['nodegroup'].append(PLTag(tag))
+ # this tag is global to the slice
+ else:
+ tag['scope'] = 'slice'
+ pltags_dict['slice-global'].append(PLTag(tag))
+ return pltags_dict
def get_slice_nodes(self, slice, options=None):
if options is None: options={}
for sliver in slivers:
if sliver['slice_ids_whitelist'] and sliver['slice_id'] not in sliver['slice_ids_whitelist']:
continue
- rspec_node = self.sliver_to_rspec_node(sliver, sites, interfaces, node_tags,
+ sliver_pltags = sliver['slice-tags']
+ rspec_node = self.sliver_to_rspec_node(sliver, sites, interfaces, node_tags, sliver_pltags,
pl_initscripts, sliver_allocation_dict)
+ logger.debug('rspec of type {}'.format(rspec_node.__class__.__name__))
# manifest node element shouldn't contain available attribute
rspec_node.pop('available')
rspec_nodes.append(rspec_node)
convert a list of dictionaries into a dictionary keyed on the
specified dictionary key
"""
- return dict ( [ (rec[key],rec) for rec in recs ] )
+ return { rec[key] : rec for rec in recs }
#
# PlShell is just an xmlrpc serverproxy where methods
def __init__ (self, api):
Driver.__init__ (self, api)
- config=api.config
+ config = api.config
self.shell = PlShell (config)
- self.cache=None
+ self.cache = None
if config.SFA_AGGREGATE_CACHING:
if PlDriver.cache is None:
PlDriver.cache = Cache()
filter['slice_id'] = int(sliver_id_parts[0])
except ValueError:
filter['name'] = sliver_id_parts[0]
- slices = self.shell.GetSlices(filter,['hrn'])
+ slices = self.shell.GetSlices(filter, ['hrn'])
if not slices:
- raise Forbidden("Unable to locate slice record for sliver: %s" % xrn)
+ raise Forbidden("Unable to locate slice record for sliver: {}".format(xrn))
slice = slices[0]
slice_xrn = slice['hrn']
return slice_xrn
# make sure we have a credential for every specified sliver ierd
for sliver_name in sliver_names:
if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
+ msg = "Valid credential not found for target: {}".format(sliver_name)
raise Forbidden(msg)
########################################
if not sites:
# xxx when a site gets registered through SFA we need to set its max_slices
if 'max_slices' not in pl_record:
- pl_record['max_slices']=2
+ pl_record['max_slices'] = 2
pointer = self.shell.AddSite(pl_record)
self.shell.SetSiteHrn(int(pointer), hrn)
else:
pointer = sites[0]['site_id']
elif type == 'slice':
- acceptable_fields=['url', 'instantiation', 'name', 'description']
+ acceptable_fields = ['url', 'instantiation', 'name', 'description']
for key in pl_record.keys():
if key not in acceptable_fields:
pl_record.pop(key)
persons = self.shell.GetPersons({'peer_id': None, 'email': sfa_record['email']})
if not persons:
for key in ['first_name','last_name']:
- if key not in sfa_record: sfa_record[key]='*from*sfa*'
+ if key not in sfa_record:
+ sfa_record[key] = '*from*sfa*'
# AddPerson does not allow everything to be set
can_add = ['first_name', 'last_name', 'title','email', 'password', 'phone', 'url', 'bio']
- add_person_dict=dict ( [ (k,sfa_record[k]) for k in sfa_record if k in can_add ] )
+ add_person_dict = { k : sfa_record[k] for k in sfa_record if k in can_add }
pointer = self.shell.AddPerson(add_person_dict)
self.shell.SetPersonHrn(int(pointer), hrn)
else:
self.shell.AddPersonToSite(pointer, login_base)
# What roles should this user have?
- roles=[]
+ roles = []
if 'roles' in sfa_record:
# if specified in xml, but only low-level roles
roles = [ role for role in sfa_record['roles'] if role in ['user','tech'] ]
# at least user if no other cluse could be found
if not roles:
- roles=['user']
+ roles = ['user']
for role in roles:
self.shell.AddRoleToPerson(role, pointer)
# Add the user's key
self.shell.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
elif type == 'node':
- login_base = PlXrn(xrn=sfa_record['authority'],type='authority').pl_login_base()
+ login_base = PlXrn(xrn=sfa_record['authority'], type='authority').pl_login_base()
nodes = self.shell.GetNodes({'peer_id': None, 'hostname': pl_record['hostname']})
if not nodes:
pointer = self.shell.AddNode(login_base, pl_record)
raise UnknownSfaType(type)
if (type == "authority"):
+ logger.debug("pldriver.update: calling UpdateSite with {}".format(new_sfa_record))
self.shell.UpdateSite(pointer, new_sfa_record)
self.shell.SetSiteHrn(pointer, hrn)
elif type == "slice":
- pl_record=self.sfa_fields_to_pl_fields(type, hrn, new_sfa_record)
+ pl_record = self.sfa_fields_to_pl_fields(type, hrn, new_sfa_record)
if 'name' in pl_record:
pl_record.pop('name')
self.shell.UpdateSlice(pointer, pl_record)
##########
def remove (self, sfa_record):
- type=sfa_record['type']
- pointer=sfa_record['pointer']
+ type = sfa_record['type']
+ pointer = sfa_record['pointer']
if type == 'user':
persons = self.shell.GetPersons({'peer_id': None, 'person_id': pointer})
# only delete this person if he has site ids. if he doesnt, it probably means
return True
-
-
-
##
# Convert SFA fields to PLC fields for use when registering or updating
# registry record in the PLC database
if type == "slice":
pl_record["name"] = hrn_to_pl_slicename(hrn)
if "instantiation" in sfa_record:
- pl_record['instantiation']=sfa_record['instantiation']
+ pl_record['instantiation'] = sfa_record['instantiation']
else:
pl_record["instantiation"] = "plc-instantiated"
if "url" in sfa_record:
elif type == "authority":
pl_record["login_base"] = PlXrn(xrn=hrn,type='authority').pl_login_base()
- if "name" not in sfa_record:
+ if "name" not in sfa_record or not sfa_record['name']:
pl_record["name"] = hrn
if "abbreviated_name" not in sfa_record:
pl_record["abbreviated_name"] = hrn
# continue
sfa_info = {}
type = record['type']
- logger.info("fill_record_sfa_info - incoming record typed %s"%type)
+ logger.info("fill_record_sfa_info - incoming record typed {}".format(type))
if (type == "slice"):
# all slice users are researchers
record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
# plcapi works by changes, compute what needs to be added/deleted
def update_relation (self, subject_type, target_type, relation_name, subject_id, target_ids):
# hard-wire the code for slice/user for now, could be smarter if needed
- if subject_type =='slice' and target_type == 'user' and relation_name == 'researcher':
- subject=self.shell.GetSlices (subject_id)[0]
+ if subject_type == 'slice' and target_type == 'user' and relation_name == 'researcher':
+ subject = self.shell.GetSlices (subject_id)[0]
current_target_ids = subject['person_ids']
add_target_ids = list ( set (target_ids).difference(current_target_ids))
del_target_ids = list ( set (current_target_ids).difference(target_ids))
- logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+ logger.debug ("subject_id = {} (type={})".format(subject_id, type(subject_id)))
for target_id in add_target_ids:
self.shell.AddPersonToSlice (target_id,subject_id)
- logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ logger.debug ("add_target_id = {} (type={})".format(target_id, type(target_id)))
for target_id in del_target_ids:
- logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ logger.debug ("del_target_id = {} (type={})".format(target_id, type(target_id)))
self.shell.DeletePersonFromSlice (target_id, subject_id)
elif subject_type == 'authority' and target_type == 'user' and relation_name == 'pi':
# due to the plcapi limitations this means essentially adding pi role to all people in the list
if 'pi' not in person['roles']:
self.shell.AddRoleToPerson('pi',person['person_id'])
else:
- logger.info('unexpected relation %s to maintain, %s -> %s'%(relation_name,subject_type,target_type))
+ logger.info('unexpected relation {} to maintain, {} -> {}'\
+ .format(relation_name, subject_type, target_type))
########################################
return status
def allocate (self, urn, rspec_string, expiration, options=None):
+ """
+ Allocate a PL slice
+
+ Supported options:
+ (*) geni_users
+ (*) append : if set to True, provided attributes are appended
+ to the current list of tags for the slice
+ otherwise, the set of provided attributes are meant to be the
+ the exact set of tags at the end of the call, meaning pre-existing tags
+ are deleted if not repeated in the incoming request
+ """
if options is None: options={}
xrn = Xrn(urn)
aggregate = PlAggregate(self)
slices = PlSlices(self)
sfa_peer = slices.get_sfa_peer(xrn.get_hrn())
- slice_record=None
+ slice_record = None
users = options.get('geni_users', [])
if users:
# ensure person records exists
persons = slices.verify_persons(xrn.hrn, slice, users, sfa_peer, options=options)
# ensure slice attributes exists
- slices.verify_slice_attributes(slice, requested_attributes, options=options)
+ slices.verify_slice_tags(slice, requested_attributes, options=options)
# add/remove slice from nodes
request_nodes = rspec.version.get_nodes_with_slivers()
filter['name'] = sliver_id_parts[0]
slices = self.shell.GetSlices(filter,['hrn'])
if not slices:
- raise Forbidden("Unable to locate slice record for sliver: %s" % xrn)
+ raise Forbidden("Unable to locate slice record for sliver: {}".format(xrn))
slice = slices[0]
slice_urn = hrn_to_urn(slice['hrn'], type='slice')
urns = [slice_urn]
persons = slices.verify_persons(slice['hrn'], slice, users, sfa_peer, options=options)
# update sliver allocation states and set them to geni_provisioned
sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- dbsession=self.api.dbsession()
+ dbsession = self.api.dbsession()
SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',dbsession)
version_manager = VersionManager()
self.shell.DeleteLeases(leases_ids)
# delete sliver allocation states
- dbsession=self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids,dbsession)
+ dbsession = self.api.dbsession()
+ SliverAllocation.delete_allocations(sliver_ids, dbsession)
finally:
pass
description = self.describe(urns, 'GENI 3', options)
for sliver in description['geni_slivers']:
if sliver['geni_operational_status'] == 'geni_pending_allocation':
- raise UnsupportedOperation(action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)")
+ raise UnsupportedOperation\
+ (action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)")
#
# Perform Operational Action Here
#
class PlSlices:
- rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+ rspec_to_slice_tag = {'max_rate' : 'net_max_rate'}
def __init__(self, driver):
self.driver = driver
# XXX Sanity check; though technically this should be a system invariant
# checked with an assertion
- if slice['expires'] > MAXINT: slice['expires']= MAXINT
+ if slice['expires'] > MAXINT:
+ slice['expires'] = MAXINT
slivers.append({
'hrn': hrn,
for node in resulting_nodes:
client_id = slivers[node['hostname']]['client_id']
component_id = slivers[node['hostname']]['component_id']
- sliver_hrn = '%s.%s-%s' % (self.driver.hrn, slice['slice_id'], node['node_id'])
+ sliver_hrn = '{}.{}-{}'.format(self.driver.hrn, slice['slice_id'], node['node_id'])
sliver_id = Xrn(sliver_hrn, type='sliver').urn
record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
component_id=component_id,
slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
#self.driver.shell.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id)
- self.verify_slice_attributes(slice, slice_tags, {'append': True}, admin=True)
-
+ self.verify_slice_tags(slice, slice_tags, {'pltags':'append'}, admin=True)
def verify_site(self, slice_xrn, slice_record=None, sfa_peer=None, options=None):
site = sites[0]
else:
# create new site record
- site = {'name': 'sfa:%s' % site_hrn,
+ site = {'name': 'sfa:{}'.format(site_hrn),
'abbreviated_name': site_hrn,
'login_base': login_base,
'max_slices': 100,
( auth_hrn, _ , leaf ) = user_hrn.rpartition('.')
# somehow this has backslashes, get rid of them
auth_hrn = auth_hrn.replace('\\','')
- default_email = "%s@%s.stub"%(leaf,auth_hrn)
+ default_email = "{}@{}.stub".format(leaf, auth_hrn)
person_record = {
# required
'hrn': user_hrn,
}
- logger.debug ("about to attempt to AddPerson with %s"%person_record)
+ logger.debug ("about to attempt to AddPerson with {}".format(person_record))
try:
# the thing is, the PLE db has a limitation on re-using the same e-mail
# in the case where people have an account on ple.upmc and then then come
except:
logger.log_exc("caught during first attempt at AddPerson")
# and if that fails we start again with the email based on the hrn, which this time is unique..
- person_record['email']=default_email
- logger.debug ("second chance with email=%s"%person_record['email'])
+ person_record['email'] = default_email
+ logger.debug ("second chance with email={}".format(person_record['email']))
person_id = int (self.driver.shell.AddPerson(person_record))
self.driver.shell.AddRoleToPerson('user', person_id)
self.driver.shell.AddPersonToSite(person_id, site_id)
# this is for retrieving users from a hrn
users_by_hrn = { user['hrn'] : user for user in users }
- for user in users: logger.debug("incoming user %s"%user)
+ for user in users: logger.debug("incoming user {}".format(user))
# compute the hrn's for the authority and site
top_auth_hrn = top_auth(slice_hrn)
self.driver.shell.AddPersonKey(int(person_id), key)
- def verify_slice_attributes(self, slice, requested_slice_attributes, options=None, admin=False):
+ def verify_slice_tags(self, slice, requested_slice_attributes, options=None, admin=False):
+ """
+ This function deals with slice tags, and supports 3 modes described
+ in the 'pltags' option that can be either
+ (*) 'ignore' (default) - do nothing
+ (*) 'append' - only add incoming tags, that do not match an existing tag
+ (*) 'sync' - tries to do the plain wholesale thing,
+ i.e. to leave the db in sync with incoming tags
+ """
if options is None: options={}
- append = options.get('append', True)
- # get list of attributes users ar able to manage
+
+ # lookup 'pltags' in options to find out which mode is requested here
+ pltags = options.get('pltags', 'ignore')
+ # make sure the default is 'ignore'
+ if pltags not in ('ignore', 'append', 'sync'):
+ pltags = 'ignore'
+
+ if pltags == 'ignore':
+ logger.info('verify_slice_tags in ignore mode - leaving slice tags as-is')
+ return
+
+ # incoming data (attributes) have a (name, value) pair
+ # while PLC data (tags) have a (tagname, value) pair
+ # we must be careful not to mix these up
+
+ # get list of tags users are able to manage - based on category
filter = {'category': '*slice*'}
if not admin:
filter['|roles'] = ['user']
- slice_attributes = self.driver.shell.GetTagTypes(filter)
- valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
+ valid_tag_types = self.driver.shell.GetTagTypes(filter)
+ valid_tag_names = [ tag_type['tagname'] for tag_type in valid_tag_types ]
+ logger.debug("verify_slice_attributes: valid names={}".format(valid_tag_names))
- # get sliver attributes
- added_slice_attributes = []
- removed_slice_attributes = []
+ # get slice tags
+ slice_attributes_to_add = []
+ slice_tags_to_remove = []
# we need to keep the slice hrn anyway
- ignored_slice_attribute_names = ['hrn']
- existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
+ ignored_slice_tag_names = ['hrn']
+ existing_slice_tags = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
- # get attributes that should be removed
- for slice_tag in existing_slice_attributes:
- if slice_tag['tagname'] in ignored_slice_attribute_names:
+ # get tags that should be removed
+ for slice_tag in existing_slice_tags:
+ if slice_tag['tagname'] in ignored_slice_tag_names:
# If a slice already has a admin only role it was probably given to them by an
# admin, so we should ignore it.
- ignored_slice_attribute_names.append(slice_tag['tagname'])
- attribute_found=True
+ ignored_slice_tag_names.append(slice_tag['tagname'])
+ tag_found = True
else:
- # If an existing slice attribute was not found in the request it should
+ # If an existing slice tag was not found in the request it should
# be removed
- attribute_found=False
+ tag_found = False
for requested_attribute in requested_slice_attributes:
if requested_attribute['name'] == slice_tag['tagname'] and \
requested_attribute['value'] == slice_tag['value']:
- attribute_found=True
+ tag_found = True
break
+ # remove tags only if not in append mode
+ if not tag_found and pltags != 'append':
+ slice_tags_to_remove.append(slice_tag)
- if not attribute_found and not append:
- removed_slice_attributes.append(slice_tag)
-
- # get attributes that should be added:
+ # get tags that should be added:
for requested_attribute in requested_slice_attributes:
# if the requested attribute wasn't found we should add it
- if requested_attribute['name'] in valid_slice_attribute_names:
- attribute_found = False
- for existing_attribute in existing_slice_attributes:
+ if requested_attribute['name'] in valid_tag_names:
+ tag_found = False
+ for existing_attribute in existing_slice_tags:
if requested_attribute['name'] == existing_attribute['tagname'] and \
requested_attribute['value'] == existing_attribute['value']:
- attribute_found=True
+ tag_found = True
break
- if not attribute_found:
- added_slice_attributes.append(requested_attribute)
-
-
- # remove stale attributes
- for attribute in removed_slice_attributes:
+ if not tag_found:
+ slice_attributes_to_add.append(requested_attribute)
+
+ def friendly_message (tag_or_att):
+ name = tag_or_att['tagname'] if 'tagname' in tag_or_att else tag_or_att['name']
+ return "SliceTag slice={}, tagname={} value={}, node_id={}"\
+ .format(slice['name'], tag_or_att['name'], tag_or_att['value'], tag_or_att.get('node_id'))
+
+ # remove stale tags
+ for tag in slice_tags_to_remove:
try:
- self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
- except Exception, e:
- logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
- % (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
-
- # add requested_attributes
- for attribute in added_slice_attributes:
+ logger.info("Removing Slice Tag {}".format(friendly_message(tag)))
+ self.driver.shell.DeleteSliceTag(tag['slice_tag_id'])
+ except Exception as e:
+ logger.warn("Failed to remove slice tag {}\nCause:{}"\
+ .format(friendly_message(tag), e))
+
+ # add requested_tags
+ for attribute in slice_attributes_to_add:
try:
+ logger.info("Adding Slice Tag {}".format(friendly_message(attribute)))
self.driver.shell.AddSliceTag(slice['name'], attribute['name'],
attribute['value'], attribute.get('node_id', None))
- except Exception, e:
- logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
- % (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
-
+ except Exception as e:
+ logger.warn("Failed to add slice tag {}\nCause:{}"\
+ .format(friendly_message(attribute), e))
elif hasattr(self.element, name):
return getattr(self.element, name)
else:
- raise AttributeError, "class Element has no attribute %s" % name
+ raise AttributeError("class Element of type {} has no attribute {}"
+ .format(self.__class__.__name__, name))
fields = [
'tagname',
'value',
+ 'scope',
]
class IotlabPosition(Element):
- fields = ['posx', 'posy','posz']
+ fields = ['x', 'y','z']
class IotlabLocation(Location):
fields = list(Location.fields)
from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
from sfa.rspecs.elements.versions.plosv1FWRule import PLOSv1FWRule
+from sfa.util.sfalogging import logger
+
class PGv2SliverType:
@staticmethod
PGv2SliverType.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
@staticmethod
- def add_sliver_attributes(xml, attributes):
- if attributes:
- for attribute in attributes:
- if attribute['name'] == 'initscript':
- xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
- elif attribute['tagname'] == 'flack_info':
- attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+ def add_sliver_attributes(xml, tags):
+ if tags is None:
+ return
+ for tag in tags:
+ tagname = tag['tagname'] if 'tagname' in tag else tag['name']
+ if tagname == 'flack_info':
+ attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+ try:
attrib_dict = eval(tag['value'])
for (key, value) in attrib_dict.items():
- attrib_elem.set(key, value)
+ attrib_elem.set(key, value)
+ except Exception as e:
+ logger.warning("Could not parse dictionary in flack tag -- {}".format(e))
+ elif tagname == 'initscript':
+ xml.add_element('{%s}initscript' % xml.namespaces['planetlab'],
+ name=tag['value'])
+ else:
+ xml.add_element('{%s}attribute' % (xml.namespaces['planetlab']),
+ name = tagname,
+ value = tag['value'],
+ scope = tag.get('scope', 'unknown'),
+ )
+
@staticmethod
def get_slivers(xml, filter=None):
if filter is None: filter={}
if self.xml.schema:
self.version = self.version_manager.get_version_by_schema(self.xml.schema)
else:
- #raise InvalidRSpec('unknown rspec schema: %s' % schema)
+ #raise InvalidRSpec('unknown rspec schema: {}'.format(schema))
# TODO: Should start raising an exception once SFA defines a schema.
# for now we just default to sfa
self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
def register_rspec_element(self, element_type, element_name, element_path):
if element_type not in RSpecElements:
- raise InvalidRSpecElement(element_type, extra="no such element type: %s. Must specify a valid RSpecElement" % element_type)
+ raise InvalidRSpecElement(element_type,
+ extra="no such element type: {}. Must specify a valid RSpecElement".format(element_type))
self.elements[element_type] = RSpecElement(element_type, element_name, element_path)
def get_rspec_element(self, element_type):
if element_type not in self.elements:
- msg = "ElementType %s not registerd for this rspec" % element_type
+ msg = "ElementType {} not registered for this rspec".format(element_type)
raise InvalidRSpecElement(element_type, extra=msg)
return self.elements[element_type]
"""
if filter is None: filter={}
if element_type not in self.elements:
- msg = "Unable to search for element %s in rspec, expath expression not found." % \
- element_type
+ msg = "Unable to search for element {} in rspec, expath expression not found."\
+ .format(element_type)
raise InvalidRSpecElement(element_type, extra=msg)
rspec_element = self.get_rspec_element(element_type)
xpath = rspec_element.path + XpathFilter.xpath(filter)
return self.xml.save(filename)
if __name__ == '__main__':
- rspec = RSpec('/tmp/resources.rspec')
+ import sys
+ input = sys.argv[1]
+ with open(input) as f:
+ rspec = RSpec(f.read())
print rspec
- rspec.register_rspec_element(RSpecElements.NETWORK, 'network', '//network')
- rspec.register_rspec_element(RSpecElements.NODE, 'node', '//node')
- print rspec.get(RSpecElements.NODE)[0]
- print rspec.get(RSpecElements.NODE, depth=1)[0]
+# rspec.register_rspec_element(RSpecElements.NETWORK, 'network', '//network')
+# rspec.register_rspec_element(RSpecElements.NODE, 'node', '//node')
+# print rspec.get(RSpecElements.NODE)[0]
+# print rspec.get(RSpecElements.NODE, depth=1)[0]
self.versions = []
self.load_versions()
+ def __repr__(self):
+ return "<VersionManager with {} flavours: [{}]>"\
+ .format(len(self.versions),
+ ", ".join( [ str(x) for x in self.versions ]))
+
def load_versions(self):
path = os.path.dirname(os.path.abspath( __file__ ))
versions_path = path + os.sep + 'versions'
raise InvalidRSpec("Unkwnown RSpec schema: %s" % schema)
return retval
-def show_by_string(string):
- try:
- print v.get_version(string)
- except Exception,e:
- print e
-def show_by_schema(string):
- try:
- print v.get_version_by_schema(string)
- except Exception,e:
- print e
+ def show_by_string(self, string):
+ try:
+ print self.get_version(string)
+ except Exception as e:
+ print e
+
+ def show_by_schema(self, string):
+ try:
+ print self.get_version_by_schema(string)
+ except Exception as e:
+ print e
if __name__ == '__main__':
- v = VersionManager()
- print v.versions
- show_by_string('sfa 1')
- show_by_string('protogeni 2')
- show_by_string('protogeni 2 advertisement')
- show_by_schema('http://www.protogeni.net/resources/rspec/2/ad.xsd')
- show_by_schema('http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd')
+ manager = VersionManager()
+ print manager
+ manager.show_by_string('sfa 1')
+ manager.show_by_string('protogeni 2')
+ manager.show_by_string('protogeni 2 advertisement')
+ manager.show_by_schema('http://www.protogeni.net/resources/rspec/2/ad.xsd')
+ manager.show_by_schema('http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd')
from sfa.rspecs.elements.versions.pgv2Node import PGv2Node
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
from sfa.rspecs.elements.versions.pgv2Lease import PGv2Lease
-
+from sfa.util.sfalogging import logger
+
class PGv2(RSpecVersion):
type = 'ProtoGENI'
content_type = 'ad'
# Networks
def get_networks(self):
network_names = set()
- nodes = self.xml.xpath('//default:node[@component_manager_id] | //node[@component_manager_id]', namespaces=self.namespaces)
+ nodes = self.xml.xpath('//default:node[@component_manager_id] | //node[@component_manager_id]',
+ namespaces=self.namespaces)
for node in nodes:
if 'component_manager_id' in node.attrib:
network_urn = node.get('component_manager_id')
# Slivers
- def get_sliver_attributes(self, hostname, network=None):
- nodes = self.get_nodes({'component_id': '*%s*' %hostname})
- attribs = []
- if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ def get_sliver_attributes(self, component_id, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' %component_id})
+ try:
node = nodes[0]
sliver = node.xpath('./default:sliver_type', namespaces=self.namespaces)
if sliver is not None and isinstance(sliver, list) and len(sliver) > 0:
sliver = sliver[0]
- #attribs = self.attributes_list(sliver)
- return attribs
+ return self.attributes_list(sliver)
+ else:
+ return []
+ except:
+ return []
def get_slice_attributes(self, network=None):
slice_attributes = []
# TODO: default sliver attributes in the PG rspec?
default_ns_prefix = self.namespaces['default']
for node in nodes_with_slivers:
- sliver_attributes = self.get_sliver_attributes(node, network)
+ sliver_attributes = self.get_sliver_attributes(node['component_id'], network)
for sliver_attribute in sliver_attributes:
name=str(sliver_attribute[0])
text =str(sliver_attribute[1])
class Alchemy:
def __init__ (self, config):
- dbname="sfa"
+ dbname = "sfa"
# will be created lazily on-demand
self._session = None
# the former PostgreSQL.py used the psycopg2 directly and was doing
if self._session is None:
Session=sessionmaker ()
self._session=Session(bind=self.engine)
- logger.info('alchemy.global_session created session %s'%self._session)
+ logger.debug('alchemy.global_session created session %s'%self._session)
return self._session
def close_global_session (self):
if self._session is None: return
- logger.info('alchemy.close_global_session %s'%self._session)
+ logger.debug('alchemy.close_global_session %s'%self._session)
self._session.close()
self._session=None
def session (self):
Session=sessionmaker()
session=Session (bind=self.engine)
- logger.info('alchemy.session created session %s'%session)
+ logger.debug('alchemy.session created session %s'%session)
return session
def close_session (self, session):
- logger.info('alchemy.close_session closed session %s'%session)
+ logger.debug('alchemy.close_session closed session %s'%session)
session.close()
####################
from sqlalchemy import Table, MetaData, Column, ForeignKey
from sqlalchemy import Integer, String
-metadata=MetaData()
+metadata = MetaData()
-# this is needed my migrate so it can locate 'records.record_id'
+# this is needed by migrate so it can locate 'records.record_id'
records = \
Table ( 'records', metadata,
Column ('record_id', Integer, primary_key=True),
from sqlalchemy import Table, MetaData, Column, ForeignKey
from sqlalchemy import Integer, String
-metadata=MetaData()
+metadata = MetaData()
# this is needed by migrate so it can locate 'records.record_id'
records = \
from sqlalchemy import Table, MetaData, Column
from sqlalchemy import Integer, String
-metadata=MetaData()
+metadata = MetaData()
+
sliver_allocation_table = \
Table ( 'sliver_allocation', metadata,
Column('sliver_id', String, primary_key=True),
--- /dev/null
+# this move is about adding a 'name' column in the 'authority' table
+
+#from sfa.util.sfalogging import logger
+
+from sqlalchemy import MetaData, Table, Column, String
+from migrate.changeset.schema import create_column, drop_column
+
+def upgrade(migrate_engine):
+ metadata = MetaData(bind = migrate_engine)
+ authorities = Table('authorities', metadata, autoload=True)
+ name_column = Column('name', String)
+ name_column.create(authorities)
+
+def downgrade(migrate_engine):
+ metadata = MetaData(bind = migrate_engine)
+ authorities = Table('authorities', metadata, autoload=True)
+ authorities.c.name.drop()
from sfa.trust.gid import GID
##############################
-Base=declarative_base()
+Base = declarative_base()
####################
# dicts vs objects
# but we had to define another more internal column (classtype) so we
# accomodate variants in types like authority+am and the like
-class RegRecord (Base,AlchemyObj):
+class RegRecord(Base, AlchemyObj):
__tablename__ = 'records'
record_id = Column (Integer, primary_key=True)
# this is the discriminator that tells which class to use
if dict: self.load_from_dict (dict)
def __repr__(self):
- result="<Record id=%s, type=%s, hrn=%s, authority=%s, pointer=%s" % \
- (self.record_id, self.type, self.hrn, self.authority, self.pointer)
+ result="<Record id=%s, type=%s, hrn=%s, authority=%s" % \
+ (self.record_id, self.type, self.hrn, self.authority)
+# for extra in ('pointer', 'email', 'name'):
+# for extra in ('email', 'name'):
+# displaying names at this point it too dangerous, because of unicode
+ for extra in ('email'):
+ if hasattr(self, extra):
+ result += " {}={},".format(extra, getattr(self, extra))
# skip the uniform '--- BEGIN CERTIFICATE --' stuff
- if self.gid: result+=" gid=%s..."%self.gid[28:36]
- else: result+=" nogid"
+ if self.gid:
+ result+=" gid=%s..."%self.gid[28:36]
+ else:
+ result+=" nogid"
result += ">"
return result
else: return gid.save_to_string(save_parents=True)
def validate_datetime (self, key, incoming):
- if isinstance (incoming, datetime): return incoming
- elif isinstance (incoming, (int,float)):return datetime.fromtimestamp (incoming)
- else: logger.info("Cannot validate datetime for key %s with input %s"%\
- (key,incoming))
+ if isinstance (incoming, datetime):
+ return incoming
+ elif isinstance (incoming, (int, float)):
+ return datetime.fromtimestamp (incoming)
+ else:
+ logger.info("Cannot validate datetime for key %s with input %s"%\
+ (key,incoming))
@validates ('date_created')
- def validate_date_created (self, key, incoming): return self.validate_datetime (key, incoming)
+ def validate_date_created (self, key, incoming):
+ return self.validate_datetime (key, incoming)
@validates ('last_updated')
- def validate_last_updated (self, key, incoming): return self.validate_datetime (key, incoming)
+ def validate_last_updated (self, key, incoming):
+ return self.validate_datetime (key, incoming)
# xxx - there might be smarter ways to handle get/set'ing gid using validation hooks
def get_gid_object (self):
- if not self.gid: return None
- else: return GID(string=self.gid)
+ if not self.gid: return None
+ else: return GID(string=self.gid)
def just_created (self):
- now=datetime.utcnow()
- self.date_created=now
- self.last_updated=now
+ now = datetime.utcnow()
+ self.date_created = now
+ self.last_updated = now
def just_updated (self):
- now=datetime.utcnow()
- self.last_updated=now
+ now = datetime.utcnow()
+ self.last_updated = now
#################### cross-relations tables
# authority x user (pis) association
# all subclasses define a convenience constructor with a default value for type,
# and when applicable a way to define local fields in a kwd=value argument
####################
-class RegAuthority (RegRecord):
+class RegAuthority(RegRecord):
__tablename__ = 'authorities'
__mapper_args__ = { 'polymorphic_identity' : 'authority' }
record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
#### extensions come here
+ name = Column ('name', String)
+ #### extensions come here
reg_pis = relationship \
('RegUser',
- secondary=authority_pi_table,
- primaryjoin=RegRecord.record_id==authority_pi_table.c.authority_id,
- secondaryjoin=RegRecord.record_id==authority_pi_table.c.pi_id,
- backref='reg_authorities_as_pi')
+ secondary = authority_pi_table,
+ primaryjoin = RegRecord.record_id==authority_pi_table.c.authority_id,
+ secondaryjoin = RegRecord.record_id==authority_pi_table.c.pi_id,
+ backref = 'reg_authorities_as_pi',
+ )
def __init__ (self, **kwds):
+ # handle local settings
+ if 'name' in kwds:
+ self.name = kwds.pop('name')
# fill in type if not previously set
- if 'type' not in kwds: kwds['type']='authority'
+ if 'type' not in kwds:
+ kwds['type']='authority'
# base class constructor
RegRecord.__init__(self, **kwds)
# no proper data yet, just hack the typename
def __repr__ (self):
- return RegRecord.__repr__(self).replace("Record","Authority")
+ result = RegRecord.__repr__(self).replace("Record", "Authority")
+# here again trying to display names that can be utf8 is too dangerous
+# result.replace(">", " name={}>".format(self.name))
+ return result
def update_pis (self, pi_hrns, dbsession):
# strip that in case we have <researcher> words </researcher>
pi_hrns = [ x.strip() for x in pi_hrns ]
- request = dbsession.query (RegUser).filter(RegUser.hrn.in_(pi_hrns))
- logger.info ("RegAuthority.update_pis: %d incoming pis, %d matches found"%(len(pi_hrns),request.count()))
- pis = dbsession.query (RegUser).filter(RegUser.hrn.in_(pi_hrns)).all()
+ request = dbsession.query(RegUser).filter(RegUser.hrn.in_(pi_hrns))
+ logger.info("RegAuthority.update_pis: %d incoming pis, %d matches found"\
+ % (len(pi_hrns), request.count()))
+ pis = dbsession.query(RegUser).filter(RegUser.hrn.in_(pi_hrns)).all()
self.reg_pis = pis
####################
-class RegSlice (RegRecord):
+class RegSlice(RegRecord):
__tablename__ = 'slices'
__mapper_args__ = { 'polymorphic_identity' : 'slice' }
record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
secondary=slice_researcher_table,
primaryjoin=RegRecord.record_id==slice_researcher_table.c.slice_id,
secondaryjoin=RegRecord.record_id==slice_researcher_table.c.researcher_id,
- backref='reg_slices_as_researcher')
+ backref='reg_slices_as_researcher',
+ )
def __init__ (self, **kwds):
- if 'type' not in kwds: kwds['type']='slice'
+ if 'type' not in kwds:
+ kwds['type']='slice'
RegRecord.__init__(self, **kwds)
def __repr__ (self):
- return RegRecord.__repr__(self).replace("Record","Slice")
+ return RegRecord.__repr__(self).replace("Record", "Slice")
def update_researchers (self, researcher_hrns, dbsession):
# strip that in case we have <researcher> words </researcher>
researcher_hrns = [ x.strip() for x in researcher_hrns ]
request = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns))
- logger.info ("RegSlice.update_researchers: %d incoming researchers, %d matches found"%(len(researcher_hrns),request.count()))
+ logger.info ("RegSlice.update_researchers: %d incoming researchers, %d matches found"\
+ % (len(researcher_hrns), request.count()))
researchers = dbsession.query (RegUser).filter(RegUser.hrn.in_(researcher_hrns)).all()
self.reg_researchers = researchers
# helper function is called from the trust/ area that
def get_pis (self):
from sqlalchemy.orm import sessionmaker
- Session=sessionmaker()
- dbsession=Session.object_session(self)
+ Session = sessionmaker()
+ dbsession = Session.object_session(self)
from sfa.util.xrn import get_authority
authority_hrn = get_authority(self.hrn)
auth_record = dbsession.query(RegAuthority).filter_by(hrn=authority_hrn).first()
return auth_record.reg_pis
@validates ('expires')
- def validate_expires (self, key, incoming): return self.validate_datetime (key, incoming)
+ def validate_expires (self, key, incoming):
+ return self.validate_datetime (key, incoming)
####################
-class RegNode (RegRecord):
+class RegNode(RegRecord):
__tablename__ = 'nodes'
__mapper_args__ = { 'polymorphic_identity' : 'node' }
record_id = Column (Integer, ForeignKey ("records.record_id"), primary_key=True)
- def __init__ (self, **kwds):
- if 'type' not in kwds: kwds['type']='node'
+ def __init__(self, **kwds):
+ if 'type' not in kwds:
+ kwds['type']='node'
RegRecord.__init__(self, **kwds)
def __repr__ (self):
- return RegRecord.__repr__(self).replace("Record","Node")
+ return RegRecord.__repr__(self).replace("Record", "Node")
####################
-class RegUser (RegRecord):
+class RegUser(RegRecord):
__tablename__ = 'users'
# these objects will have type='user' in the records table
__mapper_args__ = { 'polymorphic_identity' : 'user' }
# a 'keys' tag, and assigning a list of strings in a reference column like this crashes
reg_keys = relationship \
('RegKey', backref='reg_user',
- cascade="all, delete, delete-orphan")
+ cascade = "all, delete, delete-orphan",
+ )
# so we can use RegUser (email=.., hrn=..) and the like
def __init__ (self, **kwds):
# handle local settings
- if 'email' in kwds: self.email=kwds.pop('email')
- if 'type' not in kwds: kwds['type']='user'
+ if 'email' in kwds:
+ self.email = kwds.pop('email')
+ if 'type' not in kwds:
+ kwds['type'] = 'user'
RegRecord.__init__(self, **kwds)
# append stuff at the end of the record __repr__
def __repr__ (self):
- result = RegRecord.__repr__(self).replace("Record","User")
- result.replace (">"," email=%s"%self.email)
- result += ">"
+ result = RegRecord.__repr__(self).replace("Record", "User")
+ result.replace(">", " email={}>".format(self.email))
return result
@validates('email')
# meaning, when querying the whole records, we expect there should
# be a single query to fetch all the keys
# or, is it enough that we issue a single query to retrieve all the keys
-class RegKey (Base):
+class RegKey(Base):
__tablename__ = 'keys'
key_id = Column (Integer, primary_key=True)
- record_id = Column (Integer, ForeignKey ("records.record_id"))
+ record_id = Column (Integer, ForeignKey ("records.record_id"))
key = Column (String)
pointer = Column (Integer, default = -1)
def __init__ (self, key, pointer=None):
- self.key=key
- if pointer: self.pointer=pointer
+ self.key = key
+ if pointer:
+ self.pointer = pointer
def __repr__ (self):
- result="<key id=%s key=%s..."%(self.key_id,self.key[8:16],)
- try: result += " user=%s"%self.reg_user.record_id
+ result = "<key id=%s key=%s..." % (self.key_id, self.key[8:16],)
+ try: result += " user=%s" % self.reg_user.record_id
except: result += " no-user"
result += ">"
return result
self.allocation_state = kwds['allocation_state']
def __repr__(self):
- result = "<sliver_allocation sliver_id=%s allocation_state=%s" % \
- (self.sliver_id, self.allocation_state)
+ result = "<sliver_allocation sliver_id=%s allocation_state=%s"\
+ % (self.sliver_id, self.allocation_state)
return result
@validates('allocation_state')
dbsession.commit()
def sync(self, dbsession):
- constraints = [SliverAllocation.sliver_id==self.sliver_id]
+ constraints = [SliverAllocation.sliver_id == self.sliver_id]
results = dbsession.query(SliverAllocation).filter(and_(*constraints))
records = []
for result in results:
# convert an incoming record - typically from xmlrpc - into an object
def make_record_dict (record_dict):
assert ('type' in record_dict)
- type=record_dict['type'].split('+')[0]
- if type=='authority':
- result=RegAuthority (dict=record_dict)
- elif type=='user':
- result=RegUser (dict=record_dict)
- elif type=='slice':
- result=RegSlice (dict=record_dict)
- elif type=='node':
- result=RegNode (dict=record_dict)
+ type = record_dict['type'].split('+')[0]
+ if type == 'authority':
+ result = RegAuthority (dict=record_dict)
+ elif type == 'user':
+ result = RegUser (dict=record_dict)
+ elif type == 'slice':
+ result = RegSlice (dict=record_dict)
+ elif type == 'node':
+ result = RegNode (dict=record_dict)
else:
logger.debug("Untyped RegRecord instance")
- result=RegRecord (dict=record_dict)
- logger.info ("converting dict into Reg* with type=%s"%type)
- logger.info ("returning=%s"%result)
+ result = RegRecord (dict=record_dict)
+ logger.info("converting dict into Reg* with type=%s"%type)
+ logger.info("returning=%s"%result)
# xxx todo
# register non-db attributes in an extensions field
return result
-def make_record_xml (xml):
- xml_record = XML(xml)
- xml_dict = xml_record.todict()
+def make_record_xml (xml_str):
+ xml = XML(xml_str)
+ xml_dict = xml.todict()
logger.info("load from xml, keys=%s"%xml_dict.keys())
return make_record_dict (xml_dict)
# were the relationships data came from the testbed side
# for each type, a dict of the form {<field-name-exposed-in-record>:<alchemy_accessor_name>}
# so after that, an 'authority' record will e.g. have a 'reg-pis' field with the hrns of its pi-users
-augment_map={'authority': {'reg-pis':'reg_pis',},
- 'slice': {'reg-researchers':'reg_researchers',},
- 'user': {'reg-pi-authorities':'reg_authorities_as_pi',
- 'reg-slices':'reg_slices_as_researcher',},
- }
-
-def augment_with_sfa_builtins (local_record):
+augment_map = {'authority': {'reg-pis' : 'reg_pis',},
+ 'slice': {'reg-researchers' : 'reg_researchers',},
+ 'user': {'reg-pi-authorities' : 'reg_authorities_as_pi',
+ 'reg-slices' : 'reg_slices_as_researcher',},
+ }
+
+
+# xxx mystery
+# the way we use sqlalchemy might be a little wrong
+# in any case what has been observed is that (Reg)Records as returned by an sqlalchemy
+# query not always have their __dict__ properly adjusted
+# typically a RegAuthority object would have its object.name set properly, but
+# object.__dict__ has no 'name' key
+# which is an issue because we rely on __dict__ for many things, in particular this
+# is what gets exposed to the drivers (this is historical and dates back before sqlalchemy)
+# so it is recommended to always run this function that will make sure
+# that such built-in fields are properly set in __dict__ too
+#
+def augment_with_sfa_builtins(local_record):
# don't ruin the import of that file in a client world
from sfa.util.xrn import Xrn
# add a 'urn' field
- setattr(local_record,'reg-urn',Xrn(xrn=local_record.hrn,type=local_record.type).urn)
+ setattr(local_record, 'reg-urn', Xrn(xrn=local_record.hrn, type=local_record.type).urn)
# users have keys and this is needed to synthesize 'users' sent over to CreateSliver
- if local_record.type=='user':
+ fields_to_check = []
+ if local_record.type == 'user':
user_keys = [ key.key for key in local_record.reg_keys ]
setattr(local_record, 'reg-keys', user_keys)
+ fields_to_check = ['email']
+ elif local_record.type == 'authority':
+ fields_to_check = ['name']
+ for field in fields_to_check:
+ if not field in local_record.__dict__:
+ logger.debug("augment_with_sfa_builtins: hotfixing missing '{}' in {}"
+ .format(field, local_record.hrn))
+ local_record.__dict__[field] = getattr(local_record, field)
# search in map according to record type
- type_map=augment_map.get(local_record.type,{})
+ type_map = augment_map.get(local_record.type, {})
# use type-dep. map to do the job
- for (field_name,attribute) in type_map.items():
+ for (field_name, attribute) in type_map.items():
# get related objects
- related_records = getattr(local_record,attribute,[])
+ related_records = getattr(local_record, attribute, [])
hrns = [ r.hrn for r in related_records ]
setattr (local_record, field_name, hrns)
from sfa.util.xml import XML
from sfa.trust.gid import GID
+from sfa.util.sfalogging import logger
+
class Record:
- def __init__(self, dict=None, xml=None):
+ def __init__(self, dict=None, xml_str=None):
if dict:
self.load_from_dict(dict)
- elif xml:
- xml_record = XML(xml)
- xml_dict = xml_record.todict()
+ elif xml_str:
+ xml = XML(xml_str)
+ xml_dict = xml.todict()
self.load_from_dict(xml_dict)
-
def get_field(self, field):
return self.__dict__.get(field, None)
# (and 'last_updated' does not make it at all)
# let's be flexible
def date_repr (self,fields):
- if not isinstance(fields,list): fields=[fields]
+ if not isinstance(fields,list):
+ fields = [fields]
for field in fields:
- value=getattr(self,field,None)
+ value = getattr(self,field,None)
if isinstance (value,datetime):
return datetime_to_string (value)
elif isinstance (value,(int,float)):
# fallback
return "** undef_datetime **"
- # it may be important to exclude relationships, which fortunately
+ #
+ # need to filter out results, esp. wrt relationships
+ # exclude_types must be a tuple so we can use isinstance
#
- def todict (self, exclude_types=None):
- if exclude_types is None: exclude_types=[]
- d=self.__dict__
- def exclude (k,v):
- if k.startswith('_'): return True
- if exclude_types:
- for exclude_type in exclude_types:
- if isinstance (v,exclude_type): return True
- return False
- keys=[k for (k,v) in d.items() if not exclude(k,v)]
- return dict ( [ (k,d[k]) for k in keys ] )
+ def record_to_dict (self, exclude_types=None):
+ if exclude_types is None:
+ exclude_types = ()
+ d = self.__dict__
+ def exclude (k, v):
+ return k.startswith('_') or isinstance (v, exclude_types)
+ keys = [ k for k, v in d.items() if not exclude(k, v) ]
+ return { k : d[k] for k in keys }
def toxml(self):
return self.save_as_xml()
def load_from_dict (self, d):
for (k,v) in d.iteritems():
# experimental
- if isinstance(v, StringTypes) and v.lower() in ['true']: v=True
- if isinstance(v, StringTypes) and v.lower() in ['false']: v=False
- setattr(self,k,v)
+ if isinstance(v, StringTypes) and v.lower() in ['true']:
+ v = True
+ if isinstance(v, StringTypes) and v.lower() in ['false']:
+ v = False
+ setattr(self, k, v)
# in addition we provide convenience for converting to and from xml records
# for this purpose only, we need the subclasses to define 'fields' as either
def save_as_xml (self):
# xxx not sure about the scope here
input_dict = dict( [ (key, getattr(self,key)) for key in self.fields() if getattr(self,key,None) ] )
- xml_record=XML("<record />")
- xml_record.parse_dict (input_dict)
+ xml_record = XML("<record />")
+ xml_record.parse_dict(input_dict)
return xml_record.toxml()
def dump(self, format=None, dump_parents=False, sort=False):
print 40*'='
print "RECORD"
# print remaining fields
- fields=self.fields()
+ fields = self.fields()
if sort: fields.sort()
for attrib_name in fields:
attrib = getattr(self, attrib_name)
# sounds like this should be __repr__ instead ??
# Produce the ABAC assertion. Something like [ABAC cred: Me.role<-You] or similar
- def get_summary_tostring(self):
+ def pretty_cred(self):
result = "[ABAC cred: " + str(self.get_head())
for tail in self.get_tails():
result += "<-%s" % str(tail)
cred = Credential(cred=credential)
self.client_cred = cred
logger.debug("Auth.check: handling hrn=%s and credential=%s"%\
- (hrn,cred.get_summary_tostring()))
+ (hrn,cred.pretty_cred()))
if cred.type not in ['geni_sfa']:
raise CredentialNotVerifiable(cred.type, "%s not supported" % cred.type)
# make sure the client_gid is not blank
if not self.client_gid:
- raise MissingCallerGID(self.client_cred.get_subject())
+ raise MissingCallerGID(self.client_cred.pretty_subject())
# validate the client cert if it exists
if self.peer_cert:
class Certificate:
digest = "md5"
- cert = None
- issuerKey = None
- issuerSubject = None
- parent = None
+# x509 = None
+# issuerKey = None
+# issuerSubject = None
+# parent = None
isCA = None # will be a boolean once set
separator="-----parent-----"
# @param isCA If !=None, set whether this cert is for a CA
def __init__(self, lifeDays=1825, create=False, subject=None, string=None, filename=None, isCA=None):
+ # these used to be defined in the class !
+ self.x509 = None
+ self.issuerKey = None
+ self.issuerSubject = None
+ self.parent = None
+
self.data = {}
if create or subject:
self.create(lifeDays)
# Create a blank X509 certificate and store it in this object.
def create(self, lifeDays=1825):
- self.cert = crypto.X509()
+ self.x509 = crypto.X509()
# FIXME: Use different serial #s
- self.cert.set_serial_number(3)
- self.cert.gmtime_adj_notBefore(0) # 0 means now
- self.cert.gmtime_adj_notAfter(lifeDays*60*60*24) # five years is default
- self.cert.set_version(2) # x509v3 so it can have extensions
+ self.x509.set_serial_number(3)
+ self.x509.gmtime_adj_notBefore(0) # 0 means now
+ self.x509.gmtime_adj_notAfter(lifeDays*60*60*24) # five years is default
+ self.x509.set_version(2) # x509v3 so it can have extensions
##
# certificate object.
def load_from_pyopenssl_x509(self, x509):
- self.cert = x509
+ self.x509 = x509
##
# Load the certificate from a string
else:
parts = string.split(Certificate.separator, 1)
- self.cert = crypto.load_certificate(crypto.FILETYPE_PEM, parts[0])
+ self.x509 = crypto.load_certificate(crypto.FILETYPE_PEM, parts[0])
# if there are more certs, then create a parent and let the parent load
# itself from the remainder of the string
# @param save_parents If save_parents==True, then also save the parent certificates.
def save_to_string(self, save_parents=True):
- string = crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert)
+ string = crypto.dump_certificate(crypto.FILETYPE_PEM, self.x509)
if save_parents and self.parent:
string = string + self.parent.save_to_string(save_parents)
return string
self.issuerReq = req
if cert:
# if a cert was supplied, then get the subject from the cert
- subject = cert.cert.get_subject()
+ subject = cert.x509.get_subject()
assert(subject)
self.issuerSubject = subject
# Get the issuer name
def get_issuer(self, which="CN"):
- x = self.cert.get_issuer()
+ x = self.x509.get_issuer()
return getattr(x, which)
##
setattr(subj, key, name[key])
else:
setattr(subj, "CN", name)
- self.cert.set_subject(subj)
+ self.x509.set_subject(subj)
##
# Get the subject name of the certificate
def get_subject(self, which="CN"):
- x = self.cert.get_subject()
+ x = self.x509.get_subject()
return getattr(x, which)
##
# Get a pretty-print subject name of the certificate
-
- def get_printable_subject(self):
- x = self.cert.get_subject()
- return "[ OU: %s, CN: %s, SubjectAltName: %s ]" % (getattr(x, "OU"), getattr(x, "CN"), self.get_data())
+ # let's try to make this a little more usable as is makes logs hairy
+ pretty_fields = ['email']
+ def filter_chunk(self, chunk):
+ for field in self.pretty_fields:
+ if field in chunk:
+ return " "+chunk
+
+ def pretty_cert(self):
+ message = "[Cert."
+ x = self.x509.get_subject()
+ ou = getattr(x, "OU")
+ if ou: message += " OU: {}".format(ou)
+ cn = getattr(x, "CN")
+ if cn: message += " CN: {}".format(cn)
+ data = self.get_data(field='subjectAltName')
+ if data:
+ message += " SubjectAltName:"
+ counter = 0
+ filtered = [self.filter_chunk(chunk) for chunk in data.split()]
+ message += " ".join( [f for f in filtered if f])
+ omitted = len ([f for f in filtered if not f])
+ if omitted:
+ message += "..+{} omitted".format(omitted)
+ message += "]"
+ return message
##
# Get the public key of the certificate.
def set_pubkey(self, key):
assert(isinstance(key, Keypair))
- self.cert.set_pubkey(key.get_openssl_pkey())
+ self.x509.set_pubkey(key.get_openssl_pkey())
##
# Get the public key of the certificate.
def get_pubkey(self):
m2x509 = X509.load_cert_string(self.save_to_string())
pkey = Keypair()
- pkey.key = self.cert.get_pubkey()
+ pkey.key = self.x509.get_pubkey()
pkey.m2key = m2x509.get_pubkey()
return pkey
# raise "Cannot add extension %s which had val %s with new val %s" % (name, oldExtVal, value)
ext = crypto.X509Extension (name, critical, value)
- self.cert.add_extensions([ext])
+ self.x509.add_extensions([ext])
##
# Get an X509 extension from the certificate
def sign(self):
logger.debug('certificate.sign')
- assert self.cert != None
+ assert self.x509 != None
assert self.issuerSubject != None
assert self.issuerKey != None
- self.cert.set_issuer(self.issuerSubject)
- self.cert.sign(self.issuerKey.get_openssl_pkey(), self.digest)
+ self.x509.set_issuer(self.issuerSubject)
+ self.x509.sign(self.issuerKey.get_openssl_pkey(), self.digest)
##
# Verify the authenticity of a certificate.
# XXX alternatively, if openssl has been patched, do the much simpler:
# try:
- # self.cert.verify(pkey.get_openssl_key())
+ # self.x509.verify(pkey.get_openssl_key())
# return 1
# except:
# return 0
# until a certificate is found that is signed by a trusted root.
# verify expiration time
- if self.cert.has_expired():
+ if self.x509.has_expired():
if debug_verify_chain:
- logger.debug("verify_chain: NO, Certificate %s has expired" % self.get_printable_subject())
- raise CertExpired(self.get_printable_subject(), "client cert")
+ logger.debug("verify_chain: NO, Certificate %s has expired" % self.pretty_cert())
+ raise CertExpired(self.pretty_cert(), "client cert")
# if this cert is signed by a trusted_cert, then we are set
for trusted_cert in trusted_certs:
if self.is_signed_by_cert(trusted_cert):
# verify expiration of trusted_cert ?
- if not trusted_cert.cert.has_expired():
+ if not trusted_cert.x509.has_expired():
if debug_verify_chain:
logger.debug("verify_chain: YES. Cert %s signed by trusted cert %s"%(
- self.get_printable_subject(), trusted_cert.get_printable_subject()))
+ self.pretty_cert(), trusted_cert.pretty_cert()))
return trusted_cert
else:
if debug_verify_chain:
logger.debug("verify_chain: NO. Cert %s is signed by trusted_cert %s, but that signer is expired..."%(
- self.get_printable_subject(),trusted_cert.get_printable_subject()))
- raise CertExpired(self.get_printable_subject()," signer trusted_cert %s"%trusted_cert.get_printable_subject())
+ self.pretty_cert(),trusted_cert.pretty_cert()))
+ raise CertExpired(self.pretty_cert()," signer trusted_cert %s"%trusted_cert.pretty_cert())
# if there is no parent, then no way to verify the chain
if not self.parent:
if debug_verify_chain:
logger.debug("verify_chain: NO. %s has no parent and issuer %s is not in %d trusted roots"%\
- (self.get_printable_subject(), self.get_issuer(), len(trusted_certs)))
- raise CertMissingParent(self.get_printable_subject() + \
+ (self.pretty_cert(), self.get_issuer(), len(trusted_certs)))
+ raise CertMissingParent(self.pretty_cert() + \
": Issuer %s is not one of the %d trusted roots, and cert has no parent." %\
(self.get_issuer(), len(trusted_certs)))
if not self.is_signed_by_cert(self.parent):
if debug_verify_chain:
logger.debug("verify_chain: NO. %s is not signed by parent %s, but by %s"%\
- (self.get_printable_subject(),
- self.parent.get_printable_subject(),
+ (self.pretty_cert(),
+ self.parent.pretty_cert(),
self.get_issuer()))
raise CertNotSignedByParent("%s: Parent %s, issuer %s"\
- % (self.get_printable_subject(),
- self.parent.get_printable_subject(),
+ % (self.pretty_cert(),
+ self.parent.pretty_cert(),
self.get_issuer()))
# Confirm that the parent is a CA. Only CAs can be trusted as
# extension and hope there are no other basicConstraints
if not self.parent.isCA and not (self.parent.get_extension('basicConstraints') == 'CA:TRUE'):
logger.warn("verify_chain: cert %s's parent %s is not a CA" % \
- (self.get_printable_subject(), self.parent.get_printable_subject()))
- raise CertNotSignedByParent("%s: Parent %s not a CA" % (self.get_printable_subject(),
- self.parent.get_printable_subject()))
+ (self.pretty_cert(), self.parent.pretty_cert()))
+ raise CertNotSignedByParent("%s: Parent %s not a CA" % (self.pretty_cert(),
+ self.parent.pretty_cert()))
# if the parent isn't verified...
if debug_verify_chain:
logger.debug("verify_chain: .. %s, -> verifying parent %s"%\
- (self.get_printable_subject(),self.parent.get_printable_subject()))
+ (self.pretty_cert(),self.parent.pretty_cert()))
self.parent.verify_chain(trusted_certs)
return
### more introspection
def get_extensions(self):
# pyOpenSSL does not have a way to get extensions
- triples=[]
+ triples = []
m2x509 = X509.load_cert_string(self.save_to_string())
- nb_extensions=m2x509.get_ext_count()
+ nb_extensions = m2x509.get_ext_count()
logger.debug("X509 had %d extensions"%nb_extensions)
for i in range(nb_extensions):
ext=m2x509.get_ext_at(i)
return self.data.keys()
def get_all_datas (self):
- triples=self.get_extensions()
+ triples = self.get_extensions()
for name in self.get_data_names():
triples.append( (name,self.get_data(name),'data',) )
return triples
def dump_string (self,show_extensions=False):
result = ""
- result += "CERTIFICATE for %s\n"%self.get_printable_subject()
+ result += "CERTIFICATE for %s\n"%self.pretty_cert()
result += "Issued by %s\n"%self.get_issuer()
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
if show_extensions:
- all_datas=self.get_all_datas()
+ all_datas = self.get_all_datas()
result += " has %d extensions/data attached"%len(all_datas)
- for (n,v,c) in all_datas:
+ for (n, v, c) in all_datas:
if c=='data':
result += " data: %s=%s\n"%(n,v)
else:
self.xmlsec_path = path + '/' + 'xmlsec1'
break
- def get_subject(self):
+ def pretty_subject(self):
subject = ""
if not self.gidObject:
self.decode()
if self.gidObject:
- subject = self.gidObject.get_printable_subject()
+ subject = self.gidObject.pretty_cert()
return subject
# sounds like this should be __repr__ instead ??
- def get_summary_tostring(self):
+ def pretty_cred(self):
if not self.gidObject:
self.decode()
- obj = self.gidObject.get_printable_subject()
- caller = self.gidCaller.get_printable_subject()
+ obj = self.gidObject.pretty_cert()
+ caller = self.gidCaller.pretty_cert()
exp = self.get_expiration()
# Summarize the rights too? The issuer?
- return "[ Grant %s rights on %s until %s ]" % (caller, obj, exp)
+ return "[Cred. for {caller} rights on {obj} until {exp} ]".format(**locals())
def get_signature(self):
if not self.signature:
xmlschema = etree.XMLSchema(schema_doc)
if not xmlschema.validate(tree):
error = xmlschema.error_log.last_error
- message = "%s: %s (line %s)" % (self.get_summary_tostring(), error.message, error.line)
+ message = "%s: %s (line %s)" % (self.pretty_cred(), error.message, error.line)
raise CredentialNotVerifiable(message)
if trusted_certs_required and trusted_certs is None:
# make sure it is not expired
if self.get_expiration() < datetime.datetime.utcnow():
raise CredentialNotVerifiable("Credential %s expired at %s" % \
- (self.get_summary_tostring(),
+ (self.pretty_cred(),
self.expiration.strftime(SFATIME_FORMAT)))
# Verify the signatures
msg = verified[mstart:mend]
logger.warning("Credential.verify - failed - xmlsec1 returned {}".format(verified.strip()))
raise CredentialNotVerifiable("xmlsec1 error verifying cred %s using Signature ID %s: %s" % \
- (self.get_summary_tostring(), ref, msg))
+ (self.pretty_cred(), ref, msg))
os.remove(filename)
# Verify the parents (delegation)
if trusted_gids and len(trusted_gids) > 0:
root_cred_signer.verify_chain(trusted_gids)
else:
- logger.debug("No trusted gids. Cannot verify that cred signer is signed by a trusted authority. Skipping that check.")
+ logger.debug("Cannot verify that cred signer is signed by a trusted authority. "
+ "No trusted gids. Skipping that check.")
# See if the signer is an authority over the domain of the target.
# There are multiple types of authority - accept them all here
# Maybe should be (hrn, type) = urn_to_hrn(root_cred_signer.get_urn())
root_cred_signer_type = root_cred_signer.get_type()
- if (root_cred_signer_type.find('authority') == 0):
+ if root_cred_signer_type.find('authority') == 0:
#logger.debug('Cred signer is an authority')
# signer is an authority, see if target is in authority's domain
signerhrn = root_cred_signer.get_hrn()
# Give up, credential does not pass issuer verification
- raise CredentialNotVerifiable("Could not verify credential owned by %s for object %s. Cred signer %s not the trusted authority for Cred target %s" % \
- (self.gidCaller.get_urn(), self.gidObject.get_urn(), root_cred_signer.get_hrn(), root_target_gid.get_hrn()))
-
+ raise CredentialNotVerifiable(
+ "Could not verify credential owned by {} for object {}. "
+ "Cred signer {} not the trusted authority for Cred target {}"
+ .format(self.gidCaller.get_hrn(), self.gidObject.get_hrn(),
+ root_cred_signer.get_hrn(), root_target_gid.get_hrn()))
##
# -- For Delegates (credentials with parents) verify that:
# make sure the rights given to the child are a subset of the
# parents rights (and check delegate bits)
if not parent_cred.get_privileges().is_superset(self.get_privileges()):
- raise ChildRightsNotSubsetOfParent(("Parent cred ref %s rights " % parent_cred.get_refid()) +
- self.parent.get_privileges().save_to_string() + (" not superset of delegated cred %s ref %s rights " % \
- (self.get_summary_tostring(), self.get_refid())) +
- self.get_privileges().save_to_string())
+ message = (
+ "Parent cred {} (ref {}) rights {} "
+ " not superset of delegated cred {} (ref {}) rights {}"
+ .format(parent_cred.pretty_cred(),parent_cred.get_refid(),
+ parent_cred.get_privileges().pretty_rights(),
+ self.pretty_cred(), self.get_refid(),
+ self.get_privileges().pretty_rights()))
+ logger.error(message)
+ logger.error("parent details {}".format(parent_cred.get_privileges().save_to_string()))
+ logger.error("self details {}".format(self.get_privileges().save_to_string()))
+ raise ChildRightsNotSubsetOfParent(message)
# make sure my target gid is the same as the parent's
if not parent_cred.get_gid_object().save_to_string() == \
self.get_gid_object().save_to_string():
- raise CredentialNotVerifiable("Delegated cred %s: Target gid not equal between parent and child. Parent %s" % \
- (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+ message = (
+ "Delegated cred {}: Target gid not equal between parent and child. Parent {}"
+ .format(self.pretty_cred(), parent_cred.pretty_cred()))
+ logger.error(message)
+ logger.error("parent details {}".format(parent_cred.save_to_string()))
+ logger.error("self details {}".format(self.save_to_string()))
+ raise CredentialNotVerifiable(message)
# make sure my expiry time is <= my parent's
if not parent_cred.get_expiration() >= self.get_expiration():
- raise CredentialNotVerifiable("Delegated credential %s expires after parent %s" % \
- (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+ raise CredentialNotVerifiable(
+ "Delegated credential {} expires after parent {}"
+ .format(self.pretty_cred(), parent_cred.pretty_cred()))
# make sure my signer is the parent's caller
if not parent_cred.get_gid_caller().save_to_string(False) == \
self.get_signature().get_issuer_gid().save_to_string(False):
- raise CredentialNotVerifiable("Delegated credential %s not signed by parent %s's caller" % \
- (self.get_summary_tostring(), parent_cred.get_summary_tostring()))
+ message = "Delegated credential {} not signed by parent {}'s caller"\
+ .format(self.pretty_cred(), parent_cred.pretty_cred())
+ logger.error(message)
+ logger.error("compare1 parent {}".format(parent_cred.get_gid_caller().pretty_cred()))
+ logger.error("compare1 parent details {}".format(parent_cred.get_gid_caller().save_to_string()))
+ logger.error("compare2 self {}".format(self.get_signature().get_issuer_gid().pretty_cred()))
+ logger.error("compare2 self details {}".format(self.get_signature().get_issuer_gid().save_to_string()))
+ raise CredentialNotVerifiable(message)
# Recurse
if parent_cred.parent:
# else this looks like a delegated credential, and the real caller is the issuer
else:
actual_caller_hrn=issuer_hrn
- logger.info("actual_caller_hrn: caller_hrn=%s, issuer_hrn=%s, returning %s"%(caller_hrn,issuer_hrn,actual_caller_hrn))
+ logger.info("actual_caller_hrn: caller_hrn=%s, issuer_hrn=%s, returning %s"
+ %(caller_hrn,issuer_hrn,actual_caller_hrn))
return actual_caller_hrn
##
# show_xml is ignored
def dump_string(self, dump_parents=False, show_xml=None):
result=""
- result += "CREDENTIAL %s\n" % self.get_subject()
+ result += "CREDENTIAL %s\n" % self.pretty_subject()
filename=self.get_filename()
if filename: result += "Filename %s\n"%filename
privileges = self.get_privileges()
# @param lifeDays life of GID in days - default is 1825==5 years
# @param email Email address to put in subjectAltName - default is None
- def __init__(self, create=False, subject=None, string=None, filename=None, uuid=None, hrn=None, urn=None, lifeDays=1825, email=None):
+ def __init__(self, create=False, subject=None, string=None, filename=None,
+ uuid=None, hrn=None, urn=None, lifeDays=1825, email=None):
self.uuid = None
self.hrn = None
self.urn = None
if self.parent:
# make sure the parent's hrn is a prefix of the child's hrn
if not hrn_authfor_hrn(self.parent.get_hrn(), self.get_hrn()):
- raise GidParentHrn("This cert HRN %s isn't in the namespace for parent HRN %s" % (self.get_hrn(), self.parent.get_hrn()))
+ raise GidParentHrn(
+ "This cert HRN {} isn't in the namespace for parent HRN {}"
+ .format(self.get_hrn(), self.parent.get_hrn()))
# Parent must also be an authority (of some type) to sign a GID
# There are multiple types of authority - accept them all here
if not self.parent.get_type().find('authority') == 0:
- raise GidInvalidParentHrn("This cert %s's parent %s is not an authority (is a %s)" % (self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
+ raise GidInvalidParentHrn(
+ "This cert {}'s parent {} is not an authority (is a %{})"
+ .format(self.get_hrn(), self.parent.get_hrn(), self.parent.get_type()))
# Then recurse up the chain - ensure the parent is a trusted
# root or is in the namespace of a trusted root
# trusted_hrn = trusted_hrn[:trusted_hrn.rindex('.')]
cur_hrn = self.get_hrn()
if not hrn_authfor_hrn(trusted_hrn, cur_hrn):
- raise GidParentHrn("Trusted root with HRN %s isn't a namespace authority for this cert: %s" % (trusted_hrn, cur_hrn))
+ raise GidParentHrn(
+ "Trusted root with HRN {} isn't a namespace authority for this cert: {}"
+ .format(trusted_hrn, cur_hrn))
# There are multiple types of authority - accept them all here
if not trusted_type.find('authority') == 0:
- raise GidInvalidParentHrn("This cert %s's trusted root signer %s is not an authority (is a %s)" % (self.get_hrn(), trusted_hrn, trusted_type))
-
- return
+ raise GidInvalidParentHrn(
+ "This cert {}'s trusted root signer {} is not an authority (is a {})"
+ .format(self.get_hrn(), trusted_hrn, trusted_type))
return False
return True
+ def pretty_rights(self):
+ return "<Rights{}>".format(";".join(["{}".format(r) for r in self.rights]))
# Credential has not expired
if cred.expiration and cred.expiration < datetime.datetime.utcnow():
- return False, None, "ABAC Credential expired at %s (%s)" % (cred.expiration.strftime(SFATIME_FORMAT), cred.get_summary_tostring())
+ return False, None, "ABAC Credential expired at %s (%s)" % (cred.expiration.strftime(SFATIME_FORMAT), cred.pretty_cred())
# Must be ABAC
if cred.get_cred_type() != ABACCredential.ABAC_CREDENTIAL_TYPE:
return False, None, "Credential not of type ABAC but %s" % cred.get_cred_type
if cred.signature is None or cred.signature.gid is None:
- return False, None, "Credential malformed: missing signature or signer cert. Cred: %s" % cred.get_summary_tostring()
+ return False, None, "Credential malformed: missing signature or signer cert. Cred: %s" % cred.pretty_cred()
user_gid = cred.signature.gid
user_urn = user_gid.get_urn()
# URN of signer from cert must match URN of 'speaking-for' argument
if user_urn != speaking_for_urn:
return False, None, "User URN from cred doesn't match speaking_for URN: %s != %s (cred %s)" % \
- (user_urn, speaking_for_urn, cred.get_summary_tostring())
+ (user_urn, speaking_for_urn, cred.pretty_cred())
tails = cred.get_tails()
if len(tails) != 1:
return False, None, "Invalid ABAC-SF credential: Need exactly 1 tail element, got %d (%s)" % \
- (len(tails), cred.get_summary_tostring())
+ (len(tails), cred.pretty_cred())
user_keyid = get_cert_keyid(user_gid)
tool_keyid = get_cert_keyid(tool_gid)
if user_keyid != principal_keyid or \
tool_keyid != subject_keyid or \
role != ('speaks_for_%s' % user_keyid):
- return False, None, "ABAC statement doesn't assert U.speaks_for(U)<-T (%s)" % cred.get_summary_tostring()
+ return False, None, "ABAC statement doesn't assert U.speaks_for(U)<-T (%s)" % cred.pretty_cred()
# If schema provided, validate against schema
if HAVELXML and schema and os.path.exists(schema):
xmlschema = etree.XMLSchema(schema_doc)
if not xmlschema.validate(tree):
error = xmlschema.error_log.last_error
- message = "%s: %s (line %s)" % (cred.get_summary_tostring(), error.message, error.line)
+ message = "%s: %s (line %s)" % (cred.pretty_cred(), error.message, error.line)
return False, None, ("XML Credential schema invalid: %s" % message)
if trusted_roots:
if not isinstance(cred_value, ABACCredential):
cred = CredentialFactory.createCred(cred_value)
-# print "Got a cred to check speaksfor for: %s" % cred.get_summary_tostring()
+# print "Got a cred to check speaksfor for: %s" % cred.pretty_cred()
# #cred.dump(True, True)
# print "Caller: %s" % caller_gid.dump_string(2, True)
# See if this is a valid speaks_for
# Save it
cred.save_to_file(cred_filename)
print "Created ABAC credential: '%s' in file %s" % \
- (cred.get_summary_tostring(), cred_filename)
+ (cred.pretty_cred(), cred_filename)
# FIXME: Assumes xmlsec1 is on path
# FIXME: Assumes signer is itself signed by an 'ma_gid' that can be trusted
+++ /dev/null
-This location is a placeholder for any specifics about
-e.g. deployments or test scripts that do not belong in sfa/ because we
-do not want them to be packaged.
+++ /dev/null
-python-apt
-python-dateutil
-python-debian
-python-debianbts
-python-ldap
-SOAPpy
-SQLAlchemy
-Tempita
-argparse
-chardet
-decorator
-passlib
-psycopg2
-pyOpenSSL
-elementtree
-lxml
-nose
-simplejson
-sqlalchemy-migrate
-wsgiref
-
+++ /dev/null
-###########################################################################
-# Copyright (C) 2012 by
-# <savakian@sfa2.grenoble.iotlab.info>
-#
-# Copyright: See COPYING file that comes with this distribution
-#
-###########################################################################
-#LDAP import
-from sfa.iotlab.LDAPapi import LDAPapi
-import ldap.modlist as modlist
-
-#logger sfa
-from sfa.util.sfalogging import logger
-
-#OAR imports
-from datetime import datetime
-from sfa.util.sfatime import SFATIME_FORMAT
-from sfa.iotlab.OARrestapi import OARrestapi
-
-#Test iotlabdriver
-from sfa.iotlab.iotlabdriver import IotlabDriver
-from sfa.iotlab.iotlabshell import IotlabShell
-from sfa.util.config import Config
-
-from sfa.generic import Generic
-import os
-import sys
-
-
-def message_and_wait(message):
- print message
- raw_input("Press Enter to continue...")
-
-def parse_options():
-
- #arguments supplied
- if len(sys.argv) > 1 :
- options_list = sys.argv[1:]
- #For each valid option, execute the associated function
- #(defined in the dictionnary supported_options)
- job_id = 1
- valid_options_dict = {}
- value_list = []
- #Passing options to the script should be done like this :
- #-10 OAR -2 IotlabDriver
- for option in options_list:
- if option in supported_options:
- #update the values used for the fonctions associated
- #with the options
-
- valid_options_dict[option] = value_list
- #empty the values list for next option
- value_list = []
- print valid_options_dict
- else:
- if option[0] == '-':
- value_list.append(option[1:])
- print "value_list", value_list
-
-
- return valid_options_dict
-
-def TestLdap(uid = None):
- logger.setLevelDebug()
-
- ldap_server = LDAPapi()
- ret = ldap_server.conn.connect(bind=True)
- ldap_server.conn.close()
- print "TEST ldap_server.conn.connect(bind=True)" , ret
-
- ret = ldap_server.conn.connect(bind=False)
- ldap_server.conn.close()
- print "TEST ldap_server.conn.connect(bind=False)", ret
-
- message_and_wait("\r\n \tLdapSeach : Get all users")
- ret = ldap_server.LdapSearch()
- print "\r\n", ret
-
- message_and_wait("\r\n \tLdapSeach : Get user with uid avakian")
- ret = ldap_server.LdapSearch('(uid=avakian)', [])
- print "\r\n", ret
-
- message_and_wait("\r\n generate ...")
- password = ldap_server.login_pwd.generate_password()
- print "\r\n TEST generate_password ", password
-
- data = {}
- data['last_name'] = "Drake"
- data['first_name'] = "Tim"
- data['givenName'] = data['first_name']
- data['mail'] = "robin@arkham.fr"
-
- record = {}
- record['hrn'] = 'iotlab.drake'
- record['last_name'] = "Drake"
- record['first_name'] = "Tim"
- record['mail'] = "robin@arkham.fr"
-
- login = ldap_server.LdapGenerateUniqueLogin(data)
- print "\r\n Robin \tgenerate_login ", login
-
- message_and_wait("\r\n find_max_uidNumber")
- maxi = ldap_server.find_max_uidNumber()
- print maxi
-
-
-
- ret = ldap_server.LdapAddUser(data)
- print "\r\n Robin \tLdapAddUser ", ret
-
- req_ldap = '(uid=' + login + ')'
- ret = ldap_server.LdapSearch(req_ldap, [])
- print "\r\n Robin \tldap_server.LdapSearch ids = %s %s" % (login, ret)
-
- message_and_wait("Password methods")
- password = "Thridrobin"
- enc = ldap_server.login_pwd.encrypt_password(password)
- print "\r\n Robin \tencrypt_password ", enc
-
- ret = ldap_server.LdapModifyUser(record, {'userPassword':enc})
- print "\r\n Robin \tChange password LdapModifyUser ", ret
-
-
-
- datanight = {}
- datanight['last_name'] = "Grayson"
- datanight['first_name'] = "Dick"
- datanight['givenName'] = datanight['first_name']
- datanight['mail'] = "nightwing@arkham.fr"
-
-
- record_night = {}
- record_night['hrn'] = 'iotlab.grayson'
- record_night['last_name'] = datanight['last_name']
- record_night['first_name'] = datanight['first_name']
- record_night['mail'] = datanight['mail']
-
- message_and_wait("\r\n LdapFindUser")
- ret = ldap_server.LdapFindUser(record_night)
- print "\r\n Nightwing \tldap_server.LdapFindUser %s : %s" % (record_night,
- ret)
-
- #ret = ldap_server.LdapSearch('(uid=grayson)', [])
- #print "\r\n Nightwing \tldap_server.LdapSearch ids = %s %s" %('grayson',ret )
- message_and_wait("Add user then delete user")
- ret = ldap_server.LdapAddUser(datanight)
- print "\r\n Nightwing \tLdapAddUser ", ret
-
- #ret = ldap_server.LdapResetPassword(record_night)
- #print "\r\n Nightwing \tLdapResetPassword de %s : %s" % (record_night, ret)
-
- ret = ldap_server.LdapDeleteUser(record_night)
- print "\r\n Nightwing \tLdapDeleteUser ", ret
-
-
- #record_myslice = {}
- #record_myslice['hrn']= 'iotlab.myslice'
- #record_myslice['last_name'] = 'myslice'
- #record_myslice['first_name'] = 'myslice'
- #record_myslice['mail'] = 'nturro@inria.fr'
- #pubkeymyslice = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuyRPwn8PZxjdhu+ciRuPyM0eVBn7XS7i3tym9F30UVhaCd09a/UEmGn7WJZdfsxV3hXqG1Wc766FEst97NuzHzELSuvy/rT96J0UHG4wae4pnzOLd6NwFdZh7pkPsgHMHxK9ALVE68Puu+EDSOB5bBZ9Q624wCIGxEpmuS/+X+dDBTKgG5Hi0WA1uKJwhLSbbXb38auh4FlYgXPsdpljTIJatt+zGL0Zsy6fdrsVRc5W8kr3/SmE4OMNyabKBNyxioSEuYhRSjoQAHnYoevEjZniP8IzscKK7qwelzGUfnJEzexikhsQamhAFti2ReiFfoHBRZxnSc49ioH7Kaci5w== root@rhoecos3.ipv6.lip6.fr"
-
- #pubkeytestuser = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYS8tzufciTm6GdNUGHQc64OfTxFebMYUwh/Jl04IPTvjjr26uakbM0M2v33HxZ5Q7PnmPN9pB/w+a+f7a7J4cNs/tApOMg2hb6UrLaOrdnDMOs4KZlfElyDsF3Zx5QwxPYvzsKADAbDVoX4NF9PttuDLdm2l3nLSvm89jfla00GBg+K8grdOCHyYZVX/Wt7kxhXDK3AidQhKJgn+iD5GxvtWMBE+7S5kJGdRW1W10lSLBW3+VNsCrKJB2s8L55Xz/l2HNBScU7T0VcMQJrFxEXKzLPagZsMz0lfLzHESoGHIZ3Tz85DfECbTtMxLts/4KoAEc3EE+PYr2VDeAggDx testuser@myslice"
-
-
-
-
- return
-
-
-def get_stuff(oar, uri):
- import httplib
- import json
- headers = {}
- data = json.dumps({})
-
- headers['X-REMOTE_IDENT'] = 'avakian'
- headers['content-length'] = '0' #seems that it does not work if we don't add this
-
-
- conn = httplib.HTTPConnection(oar.oarserver['ip'], oar.oarserver['port'])
- conn.request("GET", uri, data , headers )
- resp = (conn.getresponse()).read()
-
- conn.close()
-
-
- js = json.loads(resp)
- return js
-
-
-def TestOAR(job_id = None):
- print "JOB_ID", job_id
- if isinstance(job_id, list) :
- if len(job_id) >= 1:
- job_id = job_id[0]
- else:
- job_id = '1'
- else:
- job_id = '1'
- print "JOB_ID", job_id
- oar = OARrestapi()
- print "============USING OAR CLASS PARSING METHODS ================"
-
- message_and_wait("\r\nGET_reserved_nodes")
- nodes = oar.parser.SendRequest("GET_reserved_nodes", username = 'avakian')
- print "\r\n OAR GET_reserved_nodes ", nodes
-
- message_and_wait("GET_jobs")
- jobs = oar.parser.SendRequest("GET_jobs")
- print "\r\n OAR GET_jobs ", jobs
-
- message_and_wait( "\r\n GET_jobs_id")
- jobs = oar.parser.SendRequest("GET_jobs_id", job_id, 'avakian')
- print "\r\n OAR GET_jobs_id ", jobs
-
- # Check that the OAR requests are valid
-
- print "============RAW JSON FROM OAR ================"
- message_and_wait("\r\n Get all the jobs in the state Running,Waiting, \
- Launching of the user ")
- uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
-
- message_and_wait("\r\nGet information on the job identified by its job_id")
- uri = '/oarapi/jobs/' + job_id +'.json'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
-
- message_and_wait(" \r\nGet all the job's resources, \
- job defined by its job id %s"%(job_id))
- uri = '/oarapi/jobs/' + job_id + '/resources.json'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
-
- message_and_wait("\r\n Get server's date and timezone")
- server_timestamp, server_tz = oar.parser.SendRequest("GET_timezone")
- print "\r\n OAR GetTimezone ", server_timestamp, server_tz
- print(datetime.fromtimestamp(int(server_timestamp)).strftime(SFATIME_FORMAT))
-
- message_and_wait("\r\n Get all the resources with details from OAR")
- uri = '/oarapi/resources/full.json'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
- message_and_wait("\r\n Get all the jobs scheduled by the user")
- uri = '/oarapi/jobs.json?user=avakian'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
- return
-
-
-
-def TestIotlabshell(param = None):
-
- config = Config()
- shell = IotlabShell(config)
-
- message_and_wait("\r\n \r\n GetReservedNodes")
- nodes = shell.GetReservedNodes()
- print nodes
-
- message_and_wait("\r\n GetPersons")
- persons = shell.GetPersons()
- print "\r\n \r\n GetPersons", persons
-
-
- message_and_wait("\r\n GetLeases for the login avakian")
- leases = shell.GetLeases(login='avakian')
- print leases
-
- message_and_wait("\r\n GetLeases for slice iotlab.avakian_slice")
- leases = shell.GetLeases(lease_filter_dict=
- {'slice_hrn':'iotlab.avakian_slice'})
- print leases
-
- message_and_wait("\r\n GetLeases t_from 1405070000 ")
- leases = shell.GetLeases(lease_filter_dict={'t_from':1405070000})
- print leases
-
-def TestIotlabDriver(job_id = None):
- if job_id is None:
- job_id = 1
-
- if isinstance(job_id, list) and len(job_id) == 1:
- job_id = job_id[0]
-
- api = Generic.the_flavour().make_api(interface='registry')
- iotlabdriver = IotlabDriver(api)
-
- # Iotlabdriver methods
- slice_hrn = 'iotlab.avakian_slice'
- message_and_wait(("\r\n GetSlices slice_hrn %s "%(slice_hrn)))
- sl = iotlabdriver.GetSlices(
- slice_filter= slice_hrn, slice_filter_type='slice_hrn')
- print sl
-
- message_and_wait("\r\n GetSlices slice filter 20 (record_id_user) ")
- sl = iotlabdriver.GetSlices(slice_filter='20',
- slice_filter_type='record_id_user')
- print sl
-
- message_and_wait("\r\n GetSlices :all slice")
- sl = iotlabdriver.GetSlices()
- print sl
-
-
-
-
-
-
-def TestSQL(arg = None):
- from sfa.storage.model import make_record, RegSlice, RegRecord
- from sfa.storage.alchemy import global_dbsession
-
-
- from sqlalchemy.orm import joinedload
-
- slice_hrn = 'iotlab.avakian_slice'
- request = global_dbsession.query(RegSlice).options(joinedload('reg_researchers'))
- solo_query_slice_list = request.filter_by(hrn=slice_hrn).first()
-
- print "\r\n \r\n =========== solo_query_slice_list RegSlice \
- joinedload('reg_researchers') slice_hrn %s first %s \r\n \t "\
- %(slice_hrn, solo_query_slice_list.__dict__)
-
- query_slice_list = request.all()
- print "\r\n \r\n =========== query_slice_list RegSlice \
- joinedload('reg_researchers') ALL \r\n \t", \
- query_slice_list[0].__dict__
-
- return_slicerec_dictlist = []
- record = query_slice_list[0]
- print "\r\n \r\n =========== \r\n \t", record
-
- tmp = record.__dict__
- print "\r\n \r\n =========== \r\n \t", tmp
- tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- print "\r\n \r\n =========== \r\n \t", tmp
- #del tmp['reg_researchers']['_sa_instance_state']
- return_slicerec_dictlist.append(tmp)
-
- print "\r\n \r\n =========== \r\n \t", return_slicerec_dictlist
-
- all_records = global_dbsession.query(RegRecord).all()
-
-
-
-def RunAll( arg ):
- TestLdap()
- TestOAR()
- TestIotlabDriver()
- TestSfi()
-
-
-supported_options = {
- 'OAR' : TestOAR,
- 'LDAP': TestLdap,
- 'driver': TestIotlabDriver,
- 'shell': TestIotlabshell,
- 'sql':TestSQL,
- 'all' : RunAll, }
-
-def main():
- opts = parse_options()
- print opts
- for opt in opts:
- supported_options[opt](opts[opt])
-
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-#!/usr/bin/env python
-import sys
-import os
-from sfa.iotlab.LDAPapi import LDAPapi
-from difflib import SequenceMatcher
-
-def parse_options():
-
- #arguments supplied
- if len(sys.argv) > 1 :
- options_list = sys.argv[1:]
- print options_list
- rspec_rep = options_list[0]
- return rspec_rep
- else:
- print "Must supply Rspecs directory ", sys.argv[1:]
- return
-
-
-rspec_dir = parse_options()
-print "DIRECTORY SUPPLIED" , rspec_dir
-rspec_filename_list = ['firexp_avakian_slice_iotlab.rspec',
-'firexp_iotlab_slice_iotlab.rspec',
-'iotlab_avakian_slice_iotlab2.rspec',
-'iotlab_avakian_slice_plab.rspec',
-'firexp_iotlab_slice_all.rspec',
-'iotlab_avakian_slice_all.rspec',
-'iotlab_avakian_slice_iotlab.rspec',
-'iotlab_user_slice_iotlab.rspec',
-'test_delete_all_leases.rspec']
-
-rspec_filename_dict = {
- ('iotlab_avakian', 'iotlab', 'allocate' ):
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_iotlab.rspec',
-
- ('iotlab_avakian', 'iotlab2', 'allocate'):
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_iotlab2.rspec',
-
- ('firexp_user','iotlab', 'allocate'):
- "sfi.py allocate firexp.flab.iotlab_slice " + rspec_dir + \
- 'firexp_iotlab_slice_iotlab.rspec',
-
- ('firexp_user', 'all', 'allocate'):
- "sfi.py allocate firexp.flab.iotlab_slice "+ rspec_dir + \
- 'firexp_iotlab_slice_all.rspec',
-
- ('iotlab_user', 'iotlab', 'allocate'):
- "sfi.py allocate iotlab.user_slice "+ rspec_dir + \
- 'iotlab_user_slice_iotlab.rspec',
-
- ('firexp_avakian','iotlab', 'allocate'):
- "sfi.py allocate firexp.flab.avakian_slice " + rspec_dir + \
- 'firexp_avakian_slice_iotlab.rspec',
-
- ('iotlab_avakian', 'plab', 'allocate') :
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_plab.rspec',
-
- ('iotlab_avakian', 'all', 'allocate') :
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_all.rspec',
-
- ('iotlab_avakian', 'iotlab', 'provision' ):
- "sfi.py provision iotlab.avakian_slice",
-
- ('iotlab_avakian', 'iotlab2', 'provision'):
- "sfi.py provision iotlab.avakian_slice",
-
- ('firexp_user','iotlab', 'provision'):
- "sfi.py provision firexp.flab.iotlab_slice",
-
- ('firexp_user', 'all', 'provision'):
- "sfi.py provision firexp.flab.iotlab_slice",
-
- ('iotlab_user', 'iotlab', 'provision'):
- "sfi.py provision iotlab.user_slice",
-
- ('firexp_avakian','iotlab', 'provision'):
- "sfi.py provision firexp.flab.avakian_slice",
-
- ('iotlab_avakian', 'plab', 'provision') :
- "sfi.py provision iotlab.avakian_slice",
-
- ('iotlab_avakian', 'all', 'provision') :
- "sfi.py provision iotlab.avakian_slice",
-
- ('iotlab_avakian', 'iotlab', 'describe' ):
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_iotlab.rspec",
-
- ('iotlab_avakian', 'iotlab2', 'describe'):
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_iotlab2.rspec",
-
- ('firexp_user','iotlab', 'describe'):
- "sfi.py describe firexp.flab.iotlab_slice firexp_iotlab_slice_iotlab.rspec",
-
- ('firexp_user', 'all', 'describe'):
- "sfi.py describe firexp.flab.iotlab_slice firexp_iotlab_slice_all.rspec",
-
- ('iotlab_user', 'iotlab', 'describe'):
- "sfi.py describe iotlab.user_slice iotlab_user_slice_iotlab.rspec",
-
- ('firexp_avakian','iotlab', 'describe'):
- "sfi.py describe firexp.flab.avakian_slice firexp_avakian_slice_iotlab.rspec",
-
- ('iotlab_avakian', 'plab', 'describe') :
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_plab.rspec",
-
- ('iotlab_avakian', 'all', 'describe') :
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_all.rspec"
- }
-
-print rspec_filename_dict
-# check if the firexp user (uid user) is already in LDAP
-# in this is the case, delete it :
-ldap_server = LDAPapi()
-dn = 'uid=' + 'user' + ',' + ldap_server.baseDN
-result = ldap_server.LdapSearch('(uid=user)', [])
-
-if result != []:
- retval = ldap_server.LdapDelete(dn)
- print "deleting firexp user : ", retval
-
-# Change the sfi config file to be able to start the experiment on the federated
-# testbed with another identity and another slice
-print "config sfi"
-with open ("/root/.sfi/sfi_config", "r") as sfi_config:
- sfi_config_txt = [line for line in sfi_config]
-
-with open("/root/.sfi/sfi_config_iotlab", "r") as sfi_config_iotlab:
- sfi_config_iotlab_txt = [line for line in sfi_config_iotlab]
-
-with open("/root/.sfi/sfi_config_firexp", "r") as sfi_config_firexp:
- sfi_config_firexp_txt = [line for line in sfi_config_firexp]
-# check that we are using the iotlab sfi configuration
-result1 = SequenceMatcher(None, sfi_config_txt, sfi_config_iotlab_txt)
-
-result2 = SequenceMatcher(None, sfi_config_txt, sfi_config_firexp_txt)
-
-if result1.ratio() != 1.0:
- os.system('cp /root/.sfi/sfi_config_iotlab /root/.sfi/sfi_config')
-
-os.system('cat /root/.sfi/sfi_config')
-os.system('rm /root/tests_rspecs/iotlab_devlille_OUTPUT.rspec')
-
-print " ================= SFI.PY LIST IOTLAB ============="
-os.system('sfi.py list iotlab')
-
-
-print " ================= SFI.PY RESOURCES ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources')
-
-
-print " ================= SFI.PY RESOURCES -R IOTLAB ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -r iotlab')
-
-
-print " ================= SFI.PY RESOURCES -L ALL ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l all')
-
-print " ================= SFI.PY RESOURCES -R IOTLAB -L ALL ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -r iotlab -l all')
-
-# print " ================= SFI.PY RESOURCES -O output rspec ==========="
-# os.system('sfi.py resources -o /root/tests_rspecs/iotlab_devlille_OUTPUT.rspec')
-
-print " ================= SFI.PY RESOURCES -L LEASES ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l leases')
-
-
-print " ================= SFI.PY SHOW USER ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py show iotlab.avakian')
-
-print " ================= SFI.PY SHOW NODE ============="
-os.system('sfi.py show iotlab.m3-3.devgrenoble.iot-lab.info')
-
-
-
-print " ================= SFI.PY STATUS SLICE ============="
-os.system('sfi.py status iotlab.avakian_slice')
-
-print " ================= SFI.PY ALLOCATE SLICE on iotlab only ============="
-raw_input("Press Enter to continue...")
-os.system( rspec_filename_dict[('iotlab_avakian','iotlab' , 'allocate')])
-
-
-print " ================= SFI.PY PROVISION SLICE on iotlab only ============="
-raw_input("Press Enter to continue...")
-os.system( rspec_filename_dict[('iotlab_avakian','iotlab' , 'provision')])
-
-
-print " ================= SFI.PY DESCRIBE SLICE on iotlab only ============="
-raw_input("Press Enter to continue...")
-os.system( rspec_filename_dict[('iotlab_avakian','iotlab' , 'describe')])
-
-
-print " ================= SFI.PY RESOURCES -l all iotlab.avakian_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l all iotlab.avakian_slice')
-
-
-print " ================= SFI.PY DELETE SLICE ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py delete iotlab.avakian_slice')
-
-
-print " ================= SFI.PY ALLOCATE SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('iotlab_avakian','all', 'allocate')])
-
-
-print " ================= SFI.PY RESOURCES -l all -r iotlab iotlab.avakian_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l all -r iotlab iotlab.avakian_slice')
-
-
-print " =================SFI.PY RESOURCES -L LEASES -R IOTLAB ============== "
-os.system('sfi.py resources -r iotlab -l leases')
-
-
-print " ================= SFI.PY DELETE SLICE ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py delete iotlab.avakian_slice')
-
-print "\r\n \r\n"
-
-print " *********changing to firexp sfi config ***************"
-os.system('cp /root/.sfi/sfi_config_firexp /root/.sfi/sfi_config')
-
-
-
-print " ================= SFI.PY ALLOCATE SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('firexp_user','all', 'allocate')])
-
-print " ================= SFI.PY DESCRIBE SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('firexp_user','all', 'describe')])
-
-print " ================= SFI.PY PROVISION SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('firexp_user','all', 'provision')])
-
-
-print " ================= SFI.PY SHOW SLICE ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py show firexp.flab.iotlab_slice')
-
-
-print " ================= SFI.PY RESOURCES -l leases firexp.flab.iotlab_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l leases firexp.flab.iotlab_slice')
-
-
-print " ================= SFI.PY RESOURCES firexp.flab.iotlab_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources firexp.flab.iotlab_slice')
-
-
-
-
+++ /dev/null
-Rspec file names
-======================
-Rspec file names are constructed as follows :
- slice name used in this rspec + network in which the reserved nodes are
-
-Networks can be : iotlab, plab, all (iotlab + plab)
-
-Slices and users
-=================
-user:
-login iotlab : user
-hrn iotlab: iotlab.user
-hrn firexp: firexp.flab.iotlab_user
-slice iotlab: iotlab.user_slice
-slice firexp : firexp.flab.iotlab_slice
-
-
-This special test user comes from Firexp and is considered as an
-external user coming from a federated testbedd for Iotlab.
-
-user:
-login iotlab: avakian
-slice iotlab: iotlab.avakian_slice
-hrn firexp : firexp.flab.avakian (?)
-slice firexp : firexp.flab.avakian_slice (?)
-
-This user comes from iotlab.
-
-
-Leases
-======
-
-The starting time of the leases in those RSpec files are
-usually set to be in 2014, so that we don't have to keep the
-date in mind and check that we are not scheduling a lease
-in the past.
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+avakian_slice" start_time="1386765700" duration="10">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
- <network name="plab" >
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>TRUE</exclusive>
- <granularity grain="3600"/>
- <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1412938800" duration="1">
- <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
- </lease>
- </network>
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1412938800" duration="60">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
- </network>
-</RSpec>
-
-
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="node5.devlille.iotlab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="1"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="1"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1405078900" duration="600">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
- <network name="plab" >
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>TRUE</exclusive>
- <granularity grain="3600"/>
- <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1410346800" duration="1">
- <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
- </lease>
- </network>
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1410346800" duration="60">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
- </network>
-</RSpec>
-
-
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2014-52-27T15:14:10Z" generated="2013-05-19T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+ple:upmc+slice+myslicedemo" start_time="1400604923" duration="20">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-8.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-8.devlille.iot-lab.info.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-8.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-5.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-5.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-5.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- </network>
- <network name="plab">
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>FALSE</exclusive>
- <interface component_id="urn:publicid:IDN+plab+interface+node1:eth0" ipv4="138.96.116.135"/>
- <arch>x86_64</arch>
- <fcdistro>f14</fcdistro>
- <pldistro>onelab</pldistro>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1405078900" duration="10">
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-5.devlille.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-8.devlille.iot-lab.info"/>
- </lease>
- </network>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2014-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="plab">
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>TRUE</exclusive>
- <granularity grain="3600"/>
- <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1405080000" duration="2">
- <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>"a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+user_slice" start_time="1405078900" duration="601">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
- <network name="plab" >
- </network>
- <network name="iotlab">
- </network>
-</RSpec>
-
-
uuid = create_uuid()
pkey = Keypair(create=True)
pub_key=getattr(record,'reg_keys',None)
- if pub_key is not None:
+ if len(pub_key) > 0:
# use only first key in record
if pub_key and isinstance(pub_key, types.ListType): pub_key = pub_key[0]
pub_key = pub_key.key