import os, os.path
import tempfile
from types import StringTypes, ListType
-from optparse import OptionParser
+from argparse import ArgumentParser
from sfa.util.sfalogging import logger
+from sfa.util.faults import CredentialNotVerifiable, CertMissingParent #, ChildRightsNotSubsetOfParent
from sfa.trust.certificate import Certificate
from sfa.trust.credential import Credential
# if parent:
# extract_gids(parent, extract_parents)
+def verify_input_object (obj, kind, options):
+ if options.trusted_roots:
+ print "CHECKING...",
+ message= "against [" + (" + ".join(options.trusted_roots)) + "]"
+ try:
+ if kind=='credential':
+ print "verify",message,
+ obj.verify(options.trusted_roots)
+ elif kind in ['certificate','gid']:
+ print "verify_chain",message,
+ obj.verify_chain(options.trusted_roots)
+ print "--> OK"
+ except Exception as inst:
+ print "--> KO",type(inst).__name__
+
def handle_input (filename, options):
kind = determine_sfa_filekind(filename)
- handle_input_kind (filename,options,kind)
-
-def handle_input_kind (filename, options, kind):
-
-# dump methods current do 'print' so let's go this road for now
+ # dump methods current do 'print' so let's go this road for now
if kind=="certificate":
cert=Certificate (filename=filename)
print '--------------------',filename,'IS A',kind
cert.dump(show_extensions=options.show_extensions)
+ verify_input_object (cert, kind, options)
elif kind=="credential":
cred = Credential(filename = filename)
print '--------------------',filename,'IS A',kind
cred.dump(dump_parents = options.dump_parents, show_xml=options.show_xml)
if options.extract_gids:
- print '--------------------',filename,'embedded GIDS'
+ print '--------------------',filename,'embedded GIDs'
extract_gids(cred, extract_parents = options.dump_parents)
+ verify_input_object (cred, kind, options)
elif kind=="gid":
gid = GID(filename = filename)
print '--------------------',filename,'IS A',kind
gid.dump(dump_parents = options.dump_parents)
+ verify_input_object (gid, kind, options)
else:
print "%s: unknown filekind '%s'"% (filename,kind)
def main():
usage = """%prog file1 [ .. filen]
display info on input files"""
- parser = OptionParser(usage=usage)
-
- parser.add_option("-g", "--extract-gids", action="store_true", dest="extract_gids", default=False, help="Extract GIDs from credentials")
- parser.add_option("-p", "--dump-parents", action="store_true", dest="dump_parents", default=False, help="Show parents")
- parser.add_option("-e", "--extensions", action="store_true", dest="show_extensions", default="False", help="Show certificate extensions")
- parser.add_option("-v", "--verbose", action='count', dest='verbose', default=0, help="More and more verbose")
- parser.add_option("-x", "--xml", action='store_true', dest='show_xml', default=False, help="dumps xml tree (cred. only)")
- (options, args) = parser.parse_args()
+ parser = ArgumentParser(usage=usage)
+
+ parser.add_argument("-g", "--extract-gids", action="store_true", dest="extract_gids",
+ default=False, help="Extract GIDs from credentials")
+ parser.add_argument("-p", "--dump-parents", action="store_true", dest="dump_parents",
+ default=False, help="Show parents")
+ parser.add_argument("-e", "--extensions", action="store_true",
+ dest="show_extensions", default="False", help="Show certificate extensions")
+ parser.add_argument("-v", "--verbose", action='count',
+ dest='verbose', default=0, help="More and more verbose")
+ parser.add_argument("-x", "--xml", action='store_true',
+ dest='show_xml', default=False, help="dumps xml tree (cred. only)")
+ parser.add_argument("-c", "--check", action='append', dest='trusted_roots',
+ help="cumulative list of trusted GIDs - when provided, the input is verify'ed against these")
+ parser.add_argument("filenames",metavar='F',nargs='+',help="filenames to dump")
+ options = parser.parse_args()
logger.setLevelFromOptVerbose(options.verbose)
- if len(args) <= 0:
- parser.print_help()
- sys.exit(1)
- for f in args:
- handle_input(f,options)
+ for filename in options.filenames:
+ handle_input(filename,options)
if __name__=="__main__":
main()
-usr/lib*/python*/site-packages/sfa/senslab
+usr/lib*/python*/site-packages/sfa/iotlab
[ "$SFA_SM_ENABLED" == 1 -o "$SFA_SM_ENABLED" == True ] && action "SFA: SliceMgr" daemon /usr/bin/sfa-start.py -s -d $OPTIONS
- [ "$SFA_FLASHPOLICY_ENABLED" == 1 ] && \
+ [ "$SFA_FLASHPOLICY_ENABLED" == 1 -o "$SFA_FLASHPOLICY_ENABLED" == True ] && \
action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
touch $SFALOCK
esac
exit $RETVAL
-
version_tag='cleaningup'
scripts = glob("clientbin/*.py") + \
- [
+ [
'config/sfa-config-tty',
'config/sfa-config',
# 'config/gen-sfa-cm-config.py',
- 'sfa/server/sfa-start.py',
-# 'sfa/server/sfa_component_setup.py',
+ 'sfa/server/sfa-start.py',
+# 'sfa/server/sfa_component_setup.py',
'sfatables/sfatables',
'keyconvert/keyconvert.py',
'flashpolicy/sfa_flashpolicy.py',
]
packages = [
- 'sfa',
+ 'sfa',
'sfa/trust',
'sfa/storage',
- 'sfa/util',
+ 'sfa/util',
'sfa/server',
'sfa/methods',
'sfa/generic',
'sfa/dummy',
'sfa/openstack',
'sfa/federica',
- 'sfa/senslab',
+ 'sfa/iotlab',
'sfatables',
'sfatables/commands',
'sfatables/processors',
remove_bins = [ '/usr/bin/' + os.path.basename(bin) for bin in scripts ]
remove_files = remove_bins + [ "/etc/init.d/%s"%x for x in initscripts ]
- # remove files
+ # remove files
for filepath in remove_files:
print "removing", filepath, "...",
- try:
+ try:
os.remove(filepath)
print "success"
except: print "failed"
- # remove directories
- for directory in remove_dirs:
+ # remove directories
+ for directory in remove_dirs:
print "removing", directory, "...",
- try:
+ try:
shutil.rmtree(directory)
print "success"
except: print "failed"
else:
# avoid repeating what's in the specfile already
setup(name='sfa',
- packages = packages,
+ packages = packages,
data_files = data_files,
scripts = scripts,
url="http://svn.planet-lab.org/wiki/SFATutorial",
%define name sfa
%define version 3.0
-%define taglevel 0
+%define taglevel 2
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
%{python_sitelib}/sfa/nitos
%files senslab
-%{python_sitelib}/sfa/senslab
+%{python_sitelib}/sfa/iotlab
%files dummy
%{python_sitelib}/sfa/dummy
#[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Thu Oct 10 2013 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.0-2
+- -- core
+- Extend all versions of rspecs in order to support "links" and "channels" management methods
+- several fixes aroung Rspecs in order to make the code usable by 3rd-party tools
+- expose PL node tags in PGv2 and GENIv3 rspecs
+- fix default slivers mgt in sfav1 rspecs
+- fix SM and AM urn format in GetVersion
+- fix sfaadmin.py to handle extra testbed-dependent info
+- -- PlanetLab
+- Better management of external Slices/Persons/Sites
+- Importer ignores external Slices/Persons/Sites
+- -- Nitos
+- uniformize Leases management
+- address channels with urn
+- -- IotLab
+- Renaming slab folders, files and variables to iotlab, the new name of the platform.
+- New class ParsingResourcesFull in Oarrestapi file. Contain all the parsing nodes functions.
+- Adding a mobile field to iotlab rspec and a mobility type attribute .
+- Granularity changed to 60 sec.
+- Fixing returned rspec in GetSlices and GetLeases.
+- Corrections in import (importing nodes)
+- More testis in script files under testbeds/iotlab/tests . Creation of Rspecs request models under r testbeds/iotlab/tests /tests_rspec
+- Lease filtering by date added in GetLeases.
+- Commenting and cleaning
+
+* Wed Jun 19 2013 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-3.0-1
+- AM API v3-based first implementation.
+- planetlab native V3 driver
+- dummy native V3 driver
+- introduction of v2_to_v3_adapter
+- lease times in RFC3339 date format, not epoch
+- mostly in line with geni-v2 for non-v3 issues like packaging and all
+
* Tue Feb 26 2013 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-25
- sfi and sfaadmin list now share the same display code for related objs
- support for advertising alternate api urls - for other API versions - api_versions.xml
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'],
- 'keys': record['keys'],
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
'email': record['email']}
users.append(user)
return users
from sfa.client.candidates import Candidates
-from sfa.client.common import optparse_listvalue_callback, terminal_render, filter_records
+from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, terminal_render, filter_records
pprinter = PrettyPrinter(indent=4)
def _record_dict(self, xrn=None, type=None,
url=None, description=None, email='',
key=None,
- slices=[], researchers=[], pis=[]):
+ slices=[], researchers=[], pis=[], extras={}):
record_dict = {}
if xrn:
if type:
record_dict['email'] = email
if pis:
record_dict['pi'] = pis
+ if extras:
+ record_dict.update(extras)
return record_dict
@args('-p', '--pis', dest='pis', metavar='<PIs>',
help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @args('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>", action="callback", callback=optparse_dictvalue_callback, nargs=1, help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
def register(self, xrn, type=None, url=None, description=None, key=None, slices='',
- pis='', researchers='',email=''):
+ pis='', researchers='',email='', extras={}):
"""Create a new Registry record"""
record_dict = self._record_dict(xrn=xrn, type=type, url=url, key=key,
- slices=slices, researchers=researchers, email=email, pis=pis)
+ slices=slices, researchers=researchers, email=email, pis=pis, extras=extras)
self.api.manager.Register(self.api, record_dict)
@args('-p', '--pis', dest='pis', metavar='<PIs>',
help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @args('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>", action="callback", callback=optparse_dictvalue_callback, nargs=1, help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
def update(self, xrn, type=None, url=None, description=None, key=None, slices='',
- pis='', researchers=''):
+ pis='', researchers='', extras={}):
"""Update an existing Registry record"""
print 'incoming PIS',pis
record_dict = self._record_dict(xrn=xrn, type=type, url=url, description=description,
- key=key, slices=slices, researchers=researchers, pis=pis)
+ key=key, slices=slices, researchers=researchers, pis=pis, extras=extras)
self.api.manager.Update(self.api, record_dict)
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
"""Initialize or upgrade the db"""
from sfa.storage.dbschema import DBSchema
dbschema=DBSchema()
- dbschema.init_or_upgrade
+ dbschema.init_or_upgrade()
@args('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False,
help='Remove all registry records and all files in %s area' % help_basedir)
import sys
import os,os.path
+import subprocess
from datetime import datetime
from sfa.util.xrn import Xrn
from sfa.trust.credential import Credential
from sfa.trust.gid import GID
##########
-# a helper class to implement the bootstrapping of crypto. material
+# a helper class to implement the bootstrapping of cryptoa. material
# assuming we are starting from scratch on the client side
# what's needed to complete a full slice creation cycle
# (**) prerequisites:
# obtained at the registry with Resolve
# using the (step2) user-credential as credential
# default filename is <hrn>.<type>.cred
-
+#
+# (**) additionnally, it might make sense to upgrade a GID file
+# into a pkcs12 certificate usable in a browser
+# this bundled format allows for embedding the private key
+#
########## Implementation notes
#
# for Java is documented below
# http://nam.ece.upatras.gr/fstoolkit/trac/wiki/JavaSFAClient
#
+# (*) pkcs12
+#
+# the implementation of the pkcs12 wrapping, which is a late addition,
+# is done through direct calls to openssl
+#
####################
class SfaClientException (Exception): pass
return output
+# http://trac.myslice.info/wiki/MySlice/Developer/SFALogin
+### produce a pkcs12 bundled certificate from GID and private key
+# xxx for now we put a hard-wired password that's just, well, 'password'
+# when leaving this empty on the mac, result can't seem to be loaded in keychain..
+ def my_pkcs12_produce (self, filename):
+ password=raw_input("Enter password for p12 certificate: ")
+ openssl_command=['openssl', 'pkcs12', "-export"]
+ openssl_command += [ "-password", "pass:%s"%password ]
+ openssl_command += [ "-inkey", self.private_key_filename()]
+ openssl_command += [ "-in", self.my_gid_filename()]
+ openssl_command += [ "-out", filename ]
+ if subprocess.call(openssl_command) ==0:
+ print "Successfully created %s"%filename
+ else:
+ print "Failed to create %s"%filename
+
# Returns True if credential file is valid. Otherwise return false.
def validate_credential(self, filename):
valid = True
return self.gid_filename (self.hrn, "user")
def gid_filename (self, hrn, type):
return self.fullpath ("%s.%s.gid"%(hrn,type))
-
+ def my_pkcs12_filename (self):
+ return self.fullpath ("%s.p12"%self.hrn)
# optimizing dependencies
# originally we used classes GID or Credential or Certificate
@get_or_produce (my_gid_filename, my_gid_produce)
def my_gid (self): pass
+ @get_or_produce (my_pkcs12_filename, my_pkcs12_produce)
+ def my_pkcs12 (self): pass
+
@get_or_produce (credential_filename, credential_produce, validate_credential)
def credential (self, hrn, type): pass
"authority in set of credentials for this call")
# show_credential option
- if command in ("list","resources", "describe", "provision", "allocate", "add","update","remove","slices","delete","status","renew"):
+ if command in ("list","resources", "describe", "provision", "allocate", "add","update","remove","delete","status","renew"):
parser.add_option("-C","--credential",dest='show_credential',action='store_true',default=False,
help="show credential(s) used in human-readable form")
# registy filter option
metavar="slice_hrn", help="delegate cred. for slice HRN")
parser.add_option("-a", "--auths", dest='delegate_auths',action='append',default=[],
metavar='auth_hrn', help="delegate PI cred for auth HRN")
+ parser.add_option('-d', '--delegate', dest='delegate', help="Override 'delegate' from the config file")
+ parser.add_option('-b', '--backend', dest='backend', help="Override 'backend' from the config file")
return parser
self.private_key = client_bootstrap.private_key()
self.my_credential_string = client_bootstrap.my_credential_string ()
self.my_credential = {'geni_type': 'geni_sfa',
- 'geni_version': '3.0',
+ 'geni_version': '3',
'geni_value': self.my_credential_string}
self.my_gid = client_bootstrap.my_gid ()
self.client_bootstrap = client_bootstrap
def slice_credential(self, name):
return {'geni_type': 'geni_sfa',
- 'geni_version': '3.0',
+ 'geni_version': '3',
'geni_value': self.slice_credential_string(name)}
# xxx should be supported by sfaclientbootstrap as well
# Slice-related commands
# ==================================================================
- @register_command("","")
- def slices(self, options, args):
- "list instantiated slices (ListSlices) - returns urn's"
- server = self.sliceapi()
- # creds
- creds = [self.my_credential_string]
- # options and call_id when supported
- api_options = {}
- api_options['call_id']=unique_call_id()
- if options.show_credential:
- show_credentials(creds)
- result = server.ListSlices(creds, *self.ois(server,api_options))
- value = ReturnValue.get_value(result)
- if self.options.raw:
- save_raw_to_file(result, self.options.raw, self.options.rawformat, self.options.rawbanner)
- else:
- display_list(value)
- return
-
# show rspec for named slice
@register_command("","")
def resources(self, options, args):
# just request the version the client wants
api_options['geni_rspec_version'] = version_manager.get_version(options.rspec_version).to_dict()
else:
- api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
+ api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3'}
else:
- api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
+ api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3'}
result = server.ListResources (creds, api_options)
value = ReturnValue.get_value(result)
if self.options.raw:
'cached': True,
'info': options.info,
'list_leases': options.list_leases,
- 'geni_rspec_version': {'type': 'geni', 'version': '3.0'},
+ 'geni_rspec_version': {'type': 'geni', 'version': '3'},
}
if options.rspec_version:
version_manager = VersionManager()
# just request the version the client wants
api_options['geni_rspec_version'] = version_manager.get_version(options.rspec_version).to_dict()
else:
- api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3.0'}
+ api_options['geni_rspec_version'] = {'type': 'geni', 'version': '3'}
urn = Xrn(args[0], type='slice').get_urn()
result = server.Describe([urn], creds, api_options)
value = ReturnValue.get_value(result)
rspec = open(rspec_file).read()
api_options = {}
api_options ['call_id'] = unique_call_id()
+ # users
+ sfa_users = []
+ geni_users = []
+ slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
+ if slice_records and 'reg-researchers' in slice_records[0] and slice_records[0]['reg-researchers']!=[]:
+ slice_record = slice_records[0]
+ user_hrns = slice_record['reg-researchers']
+ user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
+ user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
+ sfa_users = sfa_users_arg(user_records, slice_record)
+ geni_users = pg_users_arg(user_records)
+
+ api_options['sfa_users'] = sfa_users
+ api_options['geni_users'] = geni_users
+
result = server.Allocate(slice_urn, creds, rspec, api_options)
value = ReturnValue.get_value(result)
if self.options.raw:
# set the requtested rspec version
version_manager = VersionManager()
- rspec_version = version_manager._get_version('geni', '3.0').to_dict()
+ rspec_version = version_manager._get_version('geni', '3').to_dict()
api_options['geni_rspec_version'] = rspec_version
# users
# }]
users = []
slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
- if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]:
+ if slice_records and 'reg-researchers' in slice_records[0] and slice_records[0]['reg-researchers']!=[]:
slice_record = slice_records[0]
- user_hrns = slice_record['researcher']
+ user_hrns = slice_record['reg-researchers']
user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
users = pg_users_arg(user_records)
$ sfi -v myslice -- or sfi -vv myslice
same but with more and more verbosity
-$ sfi m
+$ sfi m -b http://mymanifold.foo.com:7080/
is synonym to sfi myslice as no other command starts with an 'm'
+ and uses a custom backend for this one call
"""
) # register_command
def myslice (self, options, args):
if len(args)>0:
self.print_help()
sys.exit(1)
+ # enable info by default
+ self.logger.setLevelFromOptVerbose(self.options.verbose+1)
### the rough sketch goes like this
+ # (0) produce a p12 file
+ self.client_bootstrap.my_pkcs12()
+
# (a) rain check for sufficient config in sfi_config
- # we don't allow to override these settings for now
myslice_dict={}
- myslice_keys=['backend', 'delegate', 'platform', 'username']
+ myslice_keys=[ 'backend', 'delegate', 'platform', 'username']
for key in myslice_keys:
- full_key="MYSLICE_" + key.upper()
- value=getattr(self.config_instance,full_key,None)
+ value=None
+ # oct 2013 - I'm finding myself juggling with config files
+ # so a couple of command-line options can now override config
+ if hasattr(options,key) and getattr(options,key) is not None:
+ value=getattr(options,key)
+ else:
+ full_key="MYSLICE_" + key.upper()
+ value=getattr(self.config_instance,full_key,None)
if value: myslice_dict[key]=value
else: print "Unsufficient config, missing key %s in [myslice] section of sfi_config"%key
if len(myslice_dict) != len(myslice_keys):
sys.exit(1)
# (b) figure whether we are PI for the authority where we belong
- self.logger.info("Resolving our own id")
+ self.logger.info("Resolving our own id %s"%self.user)
my_records=self.registry().Resolve(self.user,self.my_credential_string)
if len(my_records)!=1: print "Cannot Resolve %s -- exiting"%self.user; sys.exit(1)
my_record=my_records[0]
my_auths = my_auths_all
if options.delegate_auths:
my_auths = list(set(my_auths_all).intersection(set(options.delegate_auths)))
+ self.logger.debug("Restricted to user-provided auths"%(my_auths))
- self.logger.info("Delegate PI creds for authorities: %s"%my_auths )
# (c) get the set of slices that we are in
my_slices_all=my_record['reg-slices']
self.logger.info("Found %d slices that we are member of"%len(my_slices_all))
self.logger.debug("They are: %s"%(my_slices_all))
my_slices = my_slices_all
+ # if user provided slices, deal only with these - if they are found
if options.delegate_slices:
my_slices = list(set(my_slices_all).intersection(set(options.delegate_slices)))
-
- self.logger.info("Delegate slice creds for slices: %s"%my_slices)
+ self.logger.debug("Restricted to user-provided slices: %s"%(my_slices))
# (d) make sure we have *valid* credentials for all these
hrn_credentials=[]
hrn_delegated_credentials = []
for (hrn, htype, credential) in hrn_credentials:
delegated_credential = self.client_bootstrap.delegate_credential_string (credential, delegatee_hrn, delegatee_type)
- hrn_delegated_credentials.append ((hrn, htype, delegated_credential, ))
+ # save these so user can monitor what she's uploaded
+ filename = os.path.join ( self.options.sfi_dir,
+ "%s.%s_for_%s.%s.cred"%(hrn,htype,delegatee_hrn,delegatee_type))
+ with file(filename,'w') as f:
+ f.write(delegated_credential)
+ self.logger.debug("(Over)wrote %s"%filename)
+ hrn_delegated_credentials.append ((hrn, htype, delegated_credential, filename, ))
# (f) and finally upload them to manifold server
# xxx todo add an option so the password can be set on the command line
# (but *NOT* in the config file) so other apps can leverage this
+ self.logger.info("Uploading on backend at %s"%myslice_dict['backend'])
uploader = ManifoldUploader (logger=self.logger,
url=myslice_dict['backend'],
platform=myslice_dict['platform'],
password=options.password)
uploader.prompt_all()
(count_all,count_success)=(0,0)
- for (hrn,htype,delegated_credential) in hrn_delegated_credentials:
+ for (hrn,htype,delegated_credential,filename) in hrn_delegated_credentials:
# inspect
inspect=Credential(string=delegated_credential)
expire_datetime=inspect.get_expiration()
if uploader.upload(delegated_credential,message=message):
count_success+=1
count_all+=1
-
self.logger.info("Successfully uploaded %d/%d credentials"%(count_success,count_all))
+
# at first I thought we would want to save these,
# like 'sfi delegate does' but on second thought
# it is probably not helpful as people would not
if count_success != count_all: sys.exit(1)
return
-# Thierry: I'm turning this off as a command, no idea what it's used for
-# @register_command("cred","")
+ @register_command("cred","")
def trusted(self, options, args):
"""
return the trusted certs at this interface (get_trusted_certs)
"""
trusted_certs = self.registry().get_trusted_certs()
for trusted_cert in trusted_certs:
+ print "\n===========================================================\n"
gid = GID(string=trusted_cert)
gid.dump()
cert = Certificate(string=trusted_cert)
self.logger.debug('Sfi.trusted -> %r'%cert.get_subject())
+ print "Certificate:\n%s\n\n"%trusted_cert
return
+"""
+This API is adapted for OpenLDAP. The file contains all LDAP classes and methods
+needed to:
+- Load the LDAP connection configuration file (login, address..) with LdapConfig
+- Connect to LDAP with ldap_co
+- Create a unique LDAP login and password for a user based on his email or last
+name and first name with LoginPassword.
+- Manage entries in LDAP using SFA records with LDAPapi (Search, Add, Delete,
+Modify)
+
+"""
import random
from passlib.hash import ldap_salted_sha1 as lssha
-from sfa.util.xrn import get_authority
-import ldap
-from sfa.util.config import Config
+from sfa.util.xrn import get_authority
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+import ldap
import ldap.modlist as modlist
-from sfa.util.sfalogging import logger
-import os.path
-#API for OpenLDAP
+import os.path
class LdapConfig():
- def __init__(self, config_file = '/etc/sfa/ldap_config.py'):
-
+ """
+ Ldap configuration class loads the configuration file and sets the
+ ldap IP address, password, people dn, web dn, group dn. All these settings
+ were defined in a separate file ldap_config.py to avoid sharing them in
+ the SFA git as it contains sensible information.
+
+ """
+ def __init__(self, config_file='/etc/sfa/ldap_config.py'):
+ """Loads configuration from file /etc/sfa/ldap_config.py and set the
+ parameters for connection to LDAP.
+
+ """
+
try:
execfile(config_file, self.__dict__)
-
+
self.config_file = config_file
# path to configuration data
self.config_path = os.path.dirname(config_file)
except IOError:
raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
-
-
+ % config_file
+
+
class ldap_co:
""" Set admin login and server configuration variables."""
-
+
def __init__(self):
- #Senslab PROD LDAP parameters
+ """Fetch LdapConfig attributes (Ldap server connection parameters and
+ defines port , version and subtree scope.
+
+ """
+ #Iotlab PROD LDAP parameters
self.ldapserv = None
ldap_config = LdapConfig()
self.config = ldap_config
- self.ldapHost = ldap_config.LDAP_IP_ADDRESS
+ self.ldapHost = ldap_config.LDAP_IP_ADDRESS
self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
self.ldapAdminDN = ldap_config.LDAP_WEB_DN
self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
-
-
self.ldapPort = ldap.PORT
- self.ldapVersion = ldap.VERSION3
+ self.ldapVersion = ldap.VERSION3
self.ldapSearchScope = ldap.SCOPE_SUBTREE
+ def connect(self, bind=True):
+ """Enables connection to the LDAP server.
+
+ :param bind: Set the bind parameter to True if a bind is needed
+ (for add/modify/delete operations). Set to False otherwise.
+ :type bind: boolean
+ :returns: dictionary with status of the connection. True if Successful,
+ False if not and in this case the error
+ message( {'bool', 'message'} ).
+ :rtype: dict
- def connect(self, bind = True):
- """
- Enables connection to the LDAP server.
- :param bind : Set the bind parameter to True if a bind is needed
- (for add/modify/delete operations).
- Set to False otherwise.
- :type bind : boolean
- :return: dictionary with status of the connection. True if Successful,
- False if not and in this case the error message( {'bool', 'message'} )
- :rtype:dict
"""
try:
self.ldapserv = ldap.open(self.ldapHost)
except ldap.LDAPError, error:
- return {'bool' : False, 'message' : error }
-
+ return {'bool': False, 'message': error}
+
# Bind with authentification
- if(bind):
+ if(bind):
return self.bind()
-
- else:
- return {'bool': True}
-
+
+ else:
+ return {'bool': True}
+
def bind(self):
- """ Binding method.
- :return: dictionary with the bind status. True if Successful,
- False if not and in this case the error message( {'bool', 'message'} )
+ """ Binding method.
+
+ :returns: dictionary with the bind status. True if Successful,
+ False if not and in this case the error message({'bool','message'})
:rtype: dict
-
+
"""
try:
# Opens a connection after a call to ldap.open in connect:
self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
-
- # Bind/authenticate with a user with apropriate
+
+ # Bind/authenticate with a user with apropriate
#rights to add objects
- self.ldapserv.simple_bind_s(self.ldapAdminDN, \
- self.ldapAdminPassword)
+ self.ldapserv.simple_bind_s(self.ldapAdminDN,
+ self.ldapAdminPassword)
except ldap.LDAPError, error:
- return {'bool' : False, 'message' : error }
+ return {'bool': False, 'message': error}
return {'bool': True}
-
+
def close(self):
- """ Close the LDAP connection.
+ """Close the LDAP connection.
+
Can throw an exception if the unbinding fails.
+
+ :returns: dictionary with the bind status if the unbinding failed and
+ in this case the dict contains an error message. The dictionary keys
+ are : ({'bool','message'})
+ :rtype: dict or None
+
"""
try:
self.ldapserv.unbind_s()
except ldap.LDAPError, error:
- return {'bool' : False, 'message' : error }
-
+ return {'bool': False, 'message': error}
+
+
class LoginPassword():
"""
+
Class to handle login and password generation, using custom login generation
algorithm.
+
"""
def __init__(self):
"""
- Sets password and login maximum length, and defines the characters
- that can be found in a random generated password.
+
+ Sets password and login maximum length, and defines the characters that
+ can be found in a random generated password.
+
"""
- self.login_max_length = 8
+ self.login_max_length = 8
self.length_password = 8
- self.chars_password = [ '!', '$', '(',')', '*', '+', ',', '-', '.', \
- '0', '1', '2', '3', '4', '5', '6', '7', '8', \
- '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', \
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', \
- 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', \
- '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', \
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p' ,'q', \
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', \
- '\'']
-
-
-
-
+ self.chars_password = ['!', '$', '(',')', '*', '+', ',', '-', '.',
+ '0', '1', '2', '3', '4', '5', '6', '7', '8',
+ '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
+ 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
+ '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
+ 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
+ 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+ '\'']
+
@staticmethod
def clean_user_names(record):
"""
- Removes special characters such as
- '-', '_' , '[', ']' and ' ' from the first name and last name.
+
+ Removes special characters such as '-', '_' , '[', ']' and ' ' from the
+ first name and last name.
+
:param record: user's record
- :type record:dict
- :return: lower_first_name and lower_last_name if they were found
- in the user's record. Return None, none otherwise.
+ :type record: dict
+ :returns: lower_first_name and lower_last_name if they were found
+ in the user's record. Return None, none otherwise.
:rtype: string, string or None, None.
+
"""
if 'first_name' in record and 'last_name' in record:
#Remove all special characters from first_name/last name
- lower_first_name = record['first_name'].replace('-','')\
- .replace('_','').replace('[','')\
- .replace(']','').replace(' ','')\
- .lower()
- lower_last_name = record['last_name'].replace('-','')\
- .replace('_','').replace('[','')\
- .replace(']','').replace(' ','')\
- .lower()
+ lower_first_name = record['first_name'].replace('-', '')\
+ .replace('_', '').replace('[', '')\
+ .replace(']', '').replace(' ', '')\
+ .lower()
+ lower_last_name = record['last_name'].replace('-', '')\
+ .replace('_', '').replace('[', '')\
+ .replace(']', '').replace(' ', '')\
+ .lower()
return lower_first_name, lower_last_name
else:
return None, None
-
+
@staticmethod
def extract_name_from_email(record):
"""
- When there is no valid first name and last name in the record,
+
+ When there is no valid first name and last name in the record,
the email is used to generate the login. Here, we assume the email
is firstname.lastname@something.smthg. The first name and last names
are extracted from the email, special charcaters are removed and
they are changed into lower case.
+
:param record: user's data
- :type record:dict
- :return: the first name and last name taken from the user's email.
- lower_first_name, lower_last_name.
+ :type record: dict
+ :returns: the first name and last name taken from the user's email.
+ lower_first_name, lower_last_name.
:rtype: string, string
+
"""
email = record['email']
lower_first_name = mail[0]
lower_last_name = mail[1]
break
-
- #Otherwise just take the part before the @ as the
+
+ #Otherwise just take the part before the @ as the
#lower_first_name and lower_last_name
if lower_first_name is None:
lower_first_name = email
- lower_last_name = email
-
+ lower_last_name = email
+
return lower_first_name, lower_last_name
def get_user_firstname_lastname(self, record):
- """Get the user first name and last name from the information
- we have in the record.
+ """
+
+ Get the user first name and last name from the information we have in
+ the record.
+
:param record: user's information
:type record: dict
- :return: the user's first name and last name.
- ..seealso: clean_user_names
- ..seealso: extract_name_from_email
+ :returns: the user's first name and last name.
+
+ .. seealso:: clean_user_names
+ .. seealso:: extract_name_from_email
+
"""
lower_first_name, lower_last_name = self.clean_user_names(record)
-
- #No first name and last name check email
+
+ #No first name and last name check email
if lower_first_name is None and lower_last_name is None:
lower_first_name, lower_last_name = \
- self.extract_name_from_email(record)
-
+ self.extract_name_from_email(record)
+
return lower_first_name, lower_last_name
-
-
+
def choose_sets_chars_for_login(self, lower_first_name, lower_last_name):
"""
- Algorithm to select sets of characters from the first name and
- last name, depending on the lenght of the last name and the
- maximum login length which in our case is set to 8 charachetrs.
+
+ Algorithm to select sets of characters from the first name and last
+ name, depending on the lenght of the last name and the maximum login
+ length which in our case is set to 8 characters.
+
:param lower_first_name: user's first name in lower case.
:param lower_last_name: usr's last name in lower case.
- :return: user's login
- :rtype:string
+ :returns: user's login
+ :rtype: string
+
"""
- length_last_name = len(lower_last_name)
+ length_last_name = len(lower_last_name)
self.login_max_length = 8
-
+
#Try generating a unique login based on first name and last name
-
- if length_last_name >= self.login_max_length :
+
+ if length_last_name >= self.login_max_length:
login = lower_last_name[0:self.login_max_length]
index = 0
- logger.debug("login : %s index : %s" %(login, index))
- elif length_last_name >= 4 :
+ logger.debug("login : %s index : %s" % (login, index))
+ elif length_last_name >= 4:
login = lower_last_name
index = 0
- logger.debug("login : %s index : %s" %(login, index))
- elif length_last_name == 3 :
+ logger.debug("login : %s index : %s" % (login, index))
+ elif length_last_name == 3:
login = lower_first_name[0:1] + lower_last_name
index = 1
- logger.debug("login : %s index : %s" %(login, index))
+ logger.debug("login : %s index : %s" % (login, index))
elif length_last_name == 2:
- if len ( lower_first_name) >=2:
+ if len(lower_first_name) >= 2:
login = lower_first_name[0:2] + lower_last_name
index = 2
- logger.debug("login : %s index : %s" %(login, index))
+ logger.debug("login : %s index : %s" % (login, index))
else:
logger.error("LoginException : \
Generation login error with \
minimum four characters")
- else :
+ else:
logger.error("LDAP LdapGenerateUniqueLogin failed : \
- impossible to generate unique login for %s %s" \
- %(lower_first_name,lower_last_name))
+ impossible to generate unique login for %s %s"
+ % (lower_first_name, lower_last_name))
return index, login
-
-
def generate_password(self):
-
- """Generate a password upon adding a new user in LDAP Directory
- (8 characters length). The generated password is composed of characters
- from the charsPassword list
- :return: the randomly generated password
+ """
+
+ Generate a password upon adding a new user in LDAP Directory
+ (8 characters length). The generated password is composed of characters
+ from the chars_password list.
+
+ :returns: the randomly generated password
:rtype: string
-
+
"""
password = str()
-
+
length = len(self.chars_password)
for index in range(self.length_password):
- char_index = random.randint(0, length-1)
+ char_index = random.randint(0, length - 1)
password += self.chars_password[char_index]
return password
-
+
@staticmethod
def encrypt_password(password):
- """ Use passlib library to make a RFC2307 LDAP encrypted password
- salt size = 8, use sha-1 algorithm.
+ """
+
+ Use passlib library to make a RFC2307 LDAP encrypted password salt size
+ is 8, use sha-1 algorithm.
+
:param password: password not encrypted.
:type password: string
- :return: Returns encrypted password.
- :rtype:string
+ :returns: Returns encrypted password.
+ :rtype: string
+
"""
- #Keep consistency with Java Senslab's LDAP API
+ #Keep consistency with Java Iotlab's LDAP API
#RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
- return lssha.encrypt(password, salt_size = 8)
-
-
-
-class LDAPapi :
+ return lssha.encrypt(password, salt_size=8)
+
+
+class LDAPapi:
+ """Defines functions to insert and search entries in the LDAP.
+
+ .. note:: class supposes the unix schema is used
+
+ """
def __init__(self):
- logger.setLevelDebug()
-
+ logger.setLevelDebug()
+
#SFA related config
config = Config()
- self.login_pwd = LoginPassword()
+ self.login_pwd = LoginPassword()
self.authname = config.SFA_REGISTRY_ROOT_AUTH
-
- self.conn = ldap_co()
- self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
- self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
- self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
- self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
-
+ self.conn = ldap_co()
+ self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
+ self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
+ self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
+ self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
self.baseDN = self.conn.ldapPeopleDN
+ self.ldapShell = '/bin/bash'
-
- self.ldapShell = '/bin/bash'
-
-
-
def LdapGenerateUniqueLogin(self, record):
"""
- Generate login for adding a new user in LDAP Directory
+
+ Generate login for adding a new user in LDAP Directory
(four characters minimum length). Get proper last name and
- first name so that the user's login can be generated.
+ first name so that the user's login can be generated.
+
:param record: Record must contain first_name and last_name.
- :param record: dict
- :return: the generated login for the user described with record if the
- login generation is successful, None if it fails.
+ :type record: dict
+ :returns: the generated login for the user described with record if the
+ login generation is successful, None if it fails.
:rtype: string or None
- """
+
+ """
#For compatibility with other ldap func
if 'mail' in record and 'email' not in record:
record['email'] = record['mail']
-
+
lower_first_name, lower_last_name = \
- self.login_pwd.get_user_firstname_lastname(record)
-
-
- index, login = self.login_pwd.choose_sets_chars_for_login( \
- lower_first_name, \
- lower_last_name)
+ self.login_pwd.get_user_firstname_lastname(record)
+
+ index, login = self.login_pwd.choose_sets_chars_for_login(
+ lower_first_name, lower_last_name)
+
login_filter = '(uid=' + login + ')'
get_attrs = ['uid']
- try :
+ try:
#Check if login already in use
-
- while (len(self.LdapSearch(login_filter, get_attrs)) is not 0 ):
-
+
+ while (len(self.LdapSearch(login_filter, get_attrs)) is not 0):
+
index += 1
if index >= 9:
logger.error("LoginException : Generation login error \
else:
try:
login = \
- lower_first_name[0:index] + \
- lower_last_name[0:self.login_pwd.login_max_length-index]
- login_filter = '(uid='+ login+ ')'
+ lower_first_name[0:index] + \
+ lower_last_name[0:
+ self.login_pwd.login_max_length
+ - index]
+ login_filter = '(uid=' + login + ')'
except KeyError:
print "lower_first_name - lower_last_name too short"
-
- logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"%(login))
+
+ logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"
+ % (login))
return login
-
- except ldap.LDAPError, error :
- logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" %error)
- return None
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" % (error))
+ return None
def find_max_uidNumber(self):
-
- """Find the LDAP max uidNumber (POSIX uid attribute) .
- Used when adding a new user in LDAP Directory
- :return: max uidNumber + 1
- :rtype:string
+ """Find the LDAP max uidNumber (POSIX uid attribute).
+
+ Used when adding a new user in LDAP Directory
+
+ :returns: max uidNumber + 1
+ :rtype: string
+
"""
#First, get all the users in the LDAP
get_attrs = "(uidNumber=*)"
login_filter = ['uidNumber']
- result_data = self.LdapSearch(get_attrs, login_filter)
+ result_data = self.LdapSearch(get_attrs, login_filter)
#It there is no user in LDAP yet, First LDAP user
if result_data == []:
max_uidnumber = self.ldapUserUidNumberMin
#Otherwise, get the highest uidNumber
else:
-
- uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data ]
+ uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]
logger.debug("LDAPapi.py \tfind_max_uidNumber \
- uidNumberList %s " %(uidNumberList))
+ uidNumberList %s " % (uidNumberList))
max_uidnumber = max(uidNumberList) + 1
-
+
return str(max_uidnumber)
-
-
+
+
def get_ssh_pkey(self, record):
- """TODO ; Get ssh public key from sfa record
+ """TODO ; Get ssh public key from sfa record
To be filled by N. Turro ? or using GID pl way?
-
+
"""
return 'A REMPLIR '
-
+
@staticmethod
- #TODO Handle OR filtering in the ldap query when
+ #TODO Handle OR filtering in the ldap query when
#dealing with a list of records instead of doing a for loop in GetPersons
- def make_ldap_filters_from_record( record=None):
- """
- Helper function to make LDAP filter requests out of SFA records.
+ def make_ldap_filters_from_record(record=None):
+ """Helper function to make LDAP filter requests out of SFA records.
+
:param record: user's sfa record. Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
+ email or mail, and if the record is enabled or not. If the dict
+ record does not have all of these, must at least contain the user's
+ email.
:type record: dict
- :return: LDAP request
+ :returns: LDAP request
:rtype: string
+
"""
req_ldap = ''
req_ldapdict = {}
if record :
- if 'first_name' in record and 'last_name' in record:
- req_ldapdict['cn'] = str(record['first_name'])+" "\
- + str(record['last_name'])
- if 'email' in record :
+ if 'first_name' in record and 'last_name' in record:
+ if record['first_name'] != record['last_name']:
+ req_ldapdict['cn'] = str(record['first_name'])+" "\
+ + str(record['last_name'])
+ if 'email' in record:
req_ldapdict['mail'] = record['email']
if 'mail' in record:
req_ldapdict['mail'] = record['mail']
if 'enabled' in record:
- if record['enabled'] == True :
+ if record['enabled'] is True:
req_ldapdict['shadowExpire'] = '-1'
else:
req_ldapdict['shadowExpire'] = '0'
-
- #Hrn should not be part of the filter because the hrn
- #presented by a certificate of a SFA user not imported in
- #Senslab does not include the senslab login in it
- #Plus, the SFA user may already have an account with senslab
+
+ #Hrn should not be part of the filter because the hrn
+ #presented by a certificate of a SFA user not imported in
+ #Iotlab does not include the iotlab login in it
+ #Plus, the SFA user may already have an account with iotlab
#using another login.
-
-
logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
- record %s req_ldapdict %s" \
- %(record, req_ldapdict))
-
+ record %s req_ldapdict %s"
+ % (record, req_ldapdict))
+
for k in req_ldapdict:
- req_ldap += '('+ str(k)+ '=' + str(req_ldapdict[k]) + ')'
- if len(req_ldapdict.keys()) >1 :
+ req_ldap += '(' + str(k) + '=' + str(req_ldapdict[k]) + ')'
+ if len(req_ldapdict.keys()) >1 :
req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
size = len(req_ldap)
- req_ldap = req_ldap[:(size-1)] +')'+ req_ldap[(size-1):]
+ req_ldap = req_ldap[:(size-1)] + ')' + req_ldap[(size-1):]
else:
req_ldap = "(cn=*)"
-
+
return req_ldap
-
+
def make_ldap_attributes_from_record(self, record):
- """When adding a new user to Senslab's LDAP, creates an attributes
- dictionnary from the SFA record understandable by LDAP.
- Generates the user's LDAP login.
- User is automatically validated (account enabled) and described
- as a SFA USER FROM OUTSIDE SENSLAB'.
+ """
+
+ When adding a new user to Iotlab's LDAP, creates an attributes
+ dictionnary from the SFA record understandable by LDAP. Generates the
+ user's LDAP login.User is automatically validated (account enabled)
+ and described as a SFA USER FROM OUTSIDE IOTLAB.
+
:param record: must contain the following keys and values:
- first_name, last_name, mail, pkey (ssh key).
+ first_name, last_name, mail, pkey (ssh key).
:type record: dict
-
- :return: dictionary of attributes using LDAP data structure
- model.
+ :returns: dictionary of attributes using LDAP data structure model.
:rtype: dict
-
+
"""
attrs = {}
- attrs['objectClass'] = ["top", "person", "inetOrgPerson", \
- "organizationalPerson", "posixAccount", \
- "shadowAccount", "systemQuotas", \
- "ldapPublicKey"]
-
-
- attrs['uid'] = self.LdapGenerateUniqueLogin(record)
+ attrs['objectClass'] = ["top", "person", "inetOrgPerson",
+ "organizationalPerson", "posixAccount",
+ "shadowAccount", "systemQuotas",
+ "ldapPublicKey"]
+
+ attrs['uid'] = self.LdapGenerateUniqueLogin(record)
try:
attrs['givenName'] = str(record['first_name']).lower().capitalize()
attrs['sn'] = str(record['last_name']).lower().capitalize()
attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
-
- except KeyError:
+
+ except KeyError:
attrs['givenName'] = attrs['uid']
attrs['sn'] = attrs['uid']
attrs['cn'] = attrs['uid']
attrs['gecos'] = attrs['uid']
-
-
- attrs['quota'] = self.ldapUserQuotaNFS
+
+ attrs['quota'] = self.ldapUserQuotaNFS
attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
attrs['loginShell'] = self.ldapShell
attrs['gidNumber'] = self.ldapUserGidNumber
try:
attrs['sshPublicKey'] = record['pkey']
except KeyError:
- attrs['sshPublicKey'] = self.get_ssh_pkey(record)
-
+ attrs['sshPublicKey'] = self.get_ssh_pkey(record)
+
- #Password is automatically generated because SFA user don't go
- #through the Senslab website used to register new users,
+ #Password is automatically generated because SFA user don't go
+ #through the Iotlab website used to register new users,
#There is no place in SFA where users can enter such information
#yet.
- #If the user wants to set his own password , he must go to the Senslab
+ #If the user wants to set his own password , he must go to the Iotlab
#website.
password = self.login_pwd.generate_password()
attrs['userPassword'] = self.login_pwd.encrypt_password(password)
-
+
#Account automatically validated (no mail request to admins)
#Set to 0 to disable the account, -1 to enable it,
attrs['shadowExpire'] = '-1'
- #Motivation field in Senslab
+ #Motivation field in Iotlab
attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
attrs['ou'] = 'SFA' #Optional: organizational unit
#No info about those here:
- attrs['l'] = 'To be defined'#Optional: Locality.
+ attrs['l'] = 'To be defined'#Optional: Locality.
attrs['st'] = 'To be defined' #Optional: state or province (country).
return attrs
def LdapAddUser(self, record) :
- """Add SFA user to LDAP if it is not in LDAP yet.
- :param record: dictionnary with the user's data.
-
- :return: a dictionary with the status (Fail= False, Success= True)
- and the uid of the newly added user if successful, or the error
- meassage it is not. Dict has keys bool and message in case of failure,
- and bool uid in case of success.
+ """Add SFA user to LDAP if it is not in LDAP yet.
+
+ :param record: dictionnary with the user's data.
+ :returns: a dictionary with the status (Fail= False, Success= True)
+ and the uid of the newly added user if successful, or the error
+ message it is not. Dict has keys bool and message in case of
+ failure, and bool uid in case of success.
:rtype: dict
-
- ..seealso: make_ldap_filters_from_record
-
+
+ .. seealso:: make_ldap_filters_from_record
+
"""
logger.debug(" \r\n \t LDAP LdapAddUser \r\n\r\n ================\r\n ")
user_ldap_attrs = self.make_ldap_attributes_from_record(record)
-
#Check if user already in LDAP wih email, first name and last name
filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
user_exist = self.LdapSearch(filter_by)
if user_exist:
logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
- already exists" %(user_ldap_attrs['sn'], \
- user_ldap_attrs['mail']))
+ already exists" % (user_ldap_attrs['sn'],
+ user_ldap_attrs['mail']))
return {'bool': False}
-
+
#Bind to the server
result = self.conn.connect()
-
+
if(result['bool']):
-
+
# A dict to help build the "body" of the object
-
- logger.debug(" \r\n \t LDAP LdapAddUser attrs %s " %user_ldap_attrs)
+ logger.debug(" \r\n \t LDAP LdapAddUser attrs %s "
+ % user_ldap_attrs)
# The dn of our new entry/object
- dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
+ dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
try:
ldif = modlist.addModlist(user_ldap_attrs)
- logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"\
- %(user_ldap_attrs, ldif) )
+ logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"
+ % (user_ldap_attrs, ldif))
self.conn.ldapserv.add_s(dn, ldif)
-
- logger.info("Adding user %s login %s in LDAP" \
- %(user_ldap_attrs['cn'] , user_ldap_attrs['uid']))
-
-
+
+ logger.info("Adding user %s login %s in LDAP"
+ % (user_ldap_attrs['cn'], user_ldap_attrs['uid']))
except ldap.LDAPError, error:
- logger.log_exc("LDAP Add Error %s" %error)
- return {'bool' : False, 'message' : error }
-
+ logger.log_exc("LDAP Add Error %s" % error)
+ return {'bool': False, 'message': error}
+
self.conn.close()
- return {'bool': True, 'uid':user_ldap_attrs['uid']}
- else:
+ return {'bool': True, 'uid': user_ldap_attrs['uid']}
+ else:
return result
-
def LdapDelete(self, person_dn):
- """
- Deletes a person in LDAP. Uses the dn of the user.
+ """Deletes a person in LDAP. Uses the dn of the user.
+
:param person_dn: user's ldap dn.
:type person_dn: string
- :return: dictionary with bool True if successful, bool False
- and the error if not.
- :rtype:dict
+ :returns: dictionary with bool True if successful, bool False
+ and the error if not.
+ :rtype: dict
+
"""
- #Connect and bind
+ #Connect and bind
result = self.conn.connect()
if(result['bool']):
try:
self.conn.ldapserv.delete_s(person_dn)
self.conn.close()
return {'bool': True}
-
+
except ldap.LDAPError, error:
- logger.log_exc("LDAP Delete Error %s" %error)
+ logger.log_exc("LDAP Delete Error %s" % error)
return {'bool': False, 'message': error}
-
-
- def LdapDeleteUser(self, record_filter):
- """
- Deletes a SFA person in LDAP, based on the user's hrn.
- :param record_filter: Filter to find the user to be deleted. Must
- contain at least the user's email.
+
+ def LdapDeleteUser(self, record_filter):
+ """Deletes a SFA person in LDAP, based on the user's hrn.
+
+ :param record_filter: Filter to find the user to be deleted. Must
+ contain at least the user's email.
:type record_filter: dict
- :return: dict with bool True if successful, bool False and error message
- otherwise
- :rtype:dict
- ..seealso: LdapFindUser docstring for more info on record filter.
- ..seealso: LdapDelete for user deletion
+ :returns: dict with bool True if successful, bool False and error
+ message otherwise.
+ :rtype: dict
+
+ .. seealso:: LdapFindUser docstring for more info on record filter.
+ .. seealso:: LdapDelete for user deletion
+
"""
- #Find uid of the person
+ #Find uid of the person
person = self.LdapFindUser(record_filter, [])
- logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s" \
- %(record_filter, person))
+ logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s"
+ % (record_filter, person))
if person:
- dn = 'uid=' + person['uid'] + "," + self.baseDN
+ dn = 'uid=' + person['uid'] + "," + self.baseDN
else:
return {'bool': False}
-
+
result = self.LdapDelete(dn)
return result
-
- def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
+ def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
""" Modifies a LDAP entry, replaces user's old attributes with
the new ones given.
- :param dn: user's absolute name in the LDAP hierarchy.
+
+ :param dn: user's absolute name in the LDAP hierarchy.
:param old_attributes_dict: old user's attributes. Keys must match
- the ones used in the LDAP model.
+ the ones used in the LDAP model.
:param new_attributes_dict: new user's attributes. Keys must match
- the ones used in the LDAP model.
+ the ones used in the LDAP model.
:type dn: string
:type old_attributes_dict: dict
:type new_attributes_dict: dict
- :return: dict bool True if Successful, bool False if not.
- :rtype:dict
+ :returns: dict bool True if Successful, bool False if not.
+ :rtype: dict
+
"""
-
+
ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
- # Connect and bind/authenticate
- result = self.conn.connect()
- if (result['bool']):
+ # Connect and bind/authenticate
+ result = self.conn.connect()
+ if (result['bool']):
try:
self.conn.ldapserv.modify_s(dn, ldif)
self.conn.close()
- return {'bool' : True }
+ return {'bool': True}
except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapModify Error %s" %error)
- return {'bool' : False }
-
-
+ logger.log_exc("LDAP LdapModify Error %s" % error)
+ return {'bool': False}
+
+
def LdapModifyUser(self, user_record, new_attributes_dict):
"""
- Gets the record from one user based on the user sfa record
- and changes the attributes according to the specified new_attributes.
- Do not use this if we need to modify the uid. Use a ModRDN
- #operation instead ( modify relative DN )
+
+ Gets the record from one user based on the user sfa recordand changes
+ the attributes according to the specified new_attributes. Do not use
+ this if we need to modify the uid. Use a ModRDN operation instead
+ ( modify relative DN ).
+
:param user_record: sfa user record.
:param new_attributes_dict: new user attributes, keys must be the
- same as the LDAP model.
+ same as the LDAP model.
:type user_record: dict
:type new_attributes_dict: dict
- :return: bool True if successful, bool False if not.
+ :returns: bool True if successful, bool False if not.
:rtype: dict
- ..seealso: make_ldap_filters_from_record for info on what is mandatory
- in the user_record.
- ..seealso: make_ldap_attributes_from_record for the LDAP objectclass.
+
+ .. seealso:: make_ldap_filters_from_record for info on what is mandatory
+ in the user_record.
+ .. seealso:: make_ldap_attributes_from_record for the LDAP objectclass.
+
"""
if user_record is None:
logger.error("LDAP \t LdapModifyUser Need user record ")
- return {'bool': False}
-
- #Get all the attributes of the user_uid_login
+ return {'bool': False}
+
+ #Get all the attributes of the user_uid_login
#person = self.LdapFindUser(record_filter,[])
req_ldap = self.make_ldap_filters_from_record(user_record)
person_list = self.LdapSearch(req_ldap, [])
- logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s" \
- %(person_list))
- if person_list and len(person_list) > 1 :
+ logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s"
+ % (person_list))
+
+ if person_list and len(person_list) > 1:
logger.error("LDAP \t LdapModifyUser Too many users returned")
return {'bool': False}
- if person_list is None :
- logger.error("LDAP \t LdapModifyUser User %s doesn't exist "\
- %(user_record))
- return {'bool': False}
-
+ if person_list is None:
+ logger.error("LDAP \t LdapModifyUser User %s doesn't exist "
+ % (user_record))
+ return {'bool': False}
+
# The dn of our existing entry/object
#One result only from ldapSearch
person = person_list[0][1]
- dn = 'uid=' + person['uid'][0] + "," + self.baseDN
-
+ dn = 'uid=' + person['uid'][0] + "," + self.baseDN
+
if new_attributes_dict:
old = {}
for k in new_attributes_dict:
if k not in person:
- old[k] = ''
- else :
+ old[k] = ''
+ else:
old[k] = person[k]
- logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"\
- %( new_attributes_dict))
+ logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"
+ % (new_attributes_dict))
result = self.LdapModify(dn, old, new_attributes_dict)
return result
else:
logger.error("LDAP \t LdapModifyUser No new attributes given. ")
- return {'bool': False}
-
-
-
-
- def LdapMarkUserAsDeleted(self, record):
- """
- Sets shadowExpire to 0, disabling the user in LDAP.
- Calls LdapModifyUser to change the shadowExpire of the user.
+ return {'bool': False}
+
+
+ def LdapMarkUserAsDeleted(self, record):
+ """
+
+ Sets shadowExpire to 0, disabling the user in LDAP. Calls LdapModifyUser
+ to change the shadowExpire of the user.
+
:param record: the record of the user who has to be disabled.
+ Should contain first_name,last_name, email or mail, and if the
+ record is enabled or not. If the dict record does not have all of
+ these, must at least contain the user's email.
:type record: dict
- :return: bool True if successful or bool False if not
- :rtype:dict
- ..seealso: LdapModifyUser
+ :returns: {bool: True} if successful or {bool: False} if not
+ :rtype: dict
+
+ .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
"""
-
+
new_attrs = {}
#Disable account
new_attrs['shadowExpire'] = '0'
ret = self.LdapModifyUser(record, new_attrs)
return ret
-
def LdapResetPassword(self, record):
- """
- Resets password for the user whose record is the parameter and changes
- the corresponding entry in the LDAP.
-
+ """Resets password for the user whose record is the parameter and
+ changes the corresponding entry in the LDAP.
+
+ :param record: user's sfa record whose Ldap password must be reset.
+ Should contain first_name,last_name,
+ email or mail, and if the record is enabled or not. If the dict
+ record does not have all of these, must at least contain the user's
+ email.
+ :type record: dict
+ :returns: return value of LdapModifyUser. True if successful, False
+ otherwise.
+
+ .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
+
"""
password = self.login_pwd.generate_password()
attrs = {}
attrs['userPassword'] = self.login_pwd.encrypt_password(password)
- logger.debug("LDAP LdapResetPassword encrypt_password %s"\
- %(attrs['userPassword']))
+ logger.debug("LDAP LdapResetPassword encrypt_password %s"
+ % (attrs['userPassword']))
result = self.LdapModifyUser(record, attrs)
return result
-
-
- def LdapSearch (self, req_ldap = None, expected_fields = None ):
- """
- Used to search directly in LDAP, by using ldap filters and
- return fields.
- When req_ldap is None, returns all the entries in the LDAP.
-
- """
- result = self.conn.connect(bind = False)
- if (result['bool']) :
-
+
+
+ def LdapSearch(self, req_ldap=None, expected_fields=None):
+ """
+ Used to search directly in LDAP, by using ldap filters and return
+ fields. When req_ldap is None, returns all the entries in the LDAP.
+
+ :param req_ldap: ldap style request, with appropriate filters,
+ example: (cn=*).
+ :param expected_fields: Fields in the user ldap entry that has to be
+ returned. If None is provided, will return 'mail', 'givenName',
+ 'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
+ :type req_ldap: string
+ :type expected_fields: list
+
+ .. seealso:: make_ldap_filters_from_record for req_ldap format.
+
+ """
+ result = self.conn.connect(bind=False)
+ if (result['bool']):
+
return_fields_list = []
- if expected_fields == None :
- return_fields_list = ['mail', 'givenName', 'sn', 'uid', \
- 'sshPublicKey', 'shadowExpire']
- else :
+ if expected_fields is None:
+ return_fields_list = ['mail', 'givenName', 'sn', 'uid',
+ 'sshPublicKey', 'shadowExpire']
+ else:
return_fields_list = expected_fields
- #No specifc request specified, get the whole LDAP
- if req_ldap == None:
+ #No specifc request specified, get the whole LDAP
+ if req_ldap is None:
req_ldap = '(cn=*)'
-
+
logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
return_fields_list %s" \
%(req_ldap, return_fields_list))
try:
msg_id = self.conn.ldapserv.search(
- self.baseDN,ldap.SCOPE_SUBTREE,\
- req_ldap, return_fields_list)
- #Get all the results matching the search from ldap in one
+ self.baseDN, ldap.SCOPE_SUBTREE,
+ req_ldap, return_fields_list)
+ #Get all the results matching the search from ldap in one
#shot (1 value)
result_type, result_data = \
- self.conn.ldapserv.result(msg_id, 1)
+ self.conn.ldapserv.result(msg_id, 1)
self.conn.close()
- logger.debug("LDAP.PY \t LdapSearch result_data %s"\
- %(result_data))
+ logger.debug("LDAP.PY \t LdapSearch result_data %s"
+ % (result_data))
return result_data
-
- except ldap.LDAPError, error :
- logger.log_exc("LDAP LdapSearch Error %s" %error)
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapSearch Error %s" % error)
return []
-
+
else:
- logger.error("LDAP.PY \t Connection Failed" )
- return
-
-
+ logger.error("LDAP.PY \t Connection Failed")
+ return
+
def _process_ldap_info_for_all_users(self, result_data):
- """
- Process the data of all enabled users in LDAP.
+ """Process the data of all enabled users in LDAP.
+
:param result_data: Contains information of all enabled users in LDAP
- and is coming from LdapSearch.
+ and is coming from LdapSearch.
:param result_data: list
- ..seealso: LdapSearch
+
+ .. seealso:: LdapSearch
+
"""
results = []
+ logger.debug(" LDAP.py _process_ldap_info_for_all_users result_data %s "
+ % (result_data))
for ldapentry in result_data:
- logger.debug(" LDAP.py LdapFindUser ldapentry name : %s " \
- %(ldapentry[1]['uid'][0]))
+ logger.debug(" LDAP.py _process_ldap_info_for_all_users \
+ ldapentry name : %s " % (ldapentry[1]['uid'][0]))
tmpname = ldapentry[1]['uid'][0]
hrn = self.authname + "." + tmpname
-
+
tmpemail = ldapentry[1]['mail'][0]
if ldapentry[1]['mail'][0] == "unknown":
tmpemail = None
-
try:
- results.append( {
- 'type': 'user',
- 'pkey': ldapentry[1]['sshPublicKey'][0],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname ,
- 'email':tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry[1]['givenName'][0],
- 'last_name': ldapentry[1]['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': self.authname,
- 'peer_authority': '',
- 'pointer' : -1,
- 'hrn': hrn,
- } )
+ results.append({
+ 'type': 'user',
+ 'pkey': ldapentry[1]['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry[1]['givenName'][0],
+ 'last_name': ldapentry[1]['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer': -1,
+ 'hrn': hrn,
+ })
except KeyError, error:
- logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s" \
- %(error))
+ logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s"
+ % (error))
return
-
- return results
-
+
+ return results
+
def _process_ldap_info_for_one_user(self, record, result_data):
- """
+ """
+
Put the user's ldap data into shape. Only deals with one user
- record and one user data from ldap.
- :param record: user record
+ record and one user data from ldap.
+
+ :param record: user record
:param result_data: Raw ldap data coming from LdapSearch
- :return: user's data dict with 'type','pkey','uid', 'email',
- 'first_name' 'last_name''serial''authority''peer_authority'
- 'pointer''hrn'
+ :returns: user's data dict with 'type','pkey','uid', 'email',
+ 'first_name' 'last_name''serial''authority''peer_authority'
+ 'pointer''hrn'
:type record: dict
:type result_data: list
- :rtype :dict
+ :rtype :dict
+
"""
#One entry only in the ldap data because we used a filter
#to find one user only
ldapentry = result_data[0][1]
- logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" %(ldapentry))
+ logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" % (ldapentry))
tmpname = ldapentry['uid'][0]
tmpemail = ldapentry['mail'][0]
if ldapentry['mail'][0] == "unknown":
tmpemail = None
-
+
parent_hrn = None
- peer_authority = None
+ peer_authority = None
if 'hrn' in record:
hrn = record['hrn']
parent_hrn = get_authority(hrn)
if parent_hrn != self.authname:
peer_authority = parent_hrn
- #In case the user was not imported from Senslab LDAP
- #but from another federated site, has an account in
- #senslab but currently using his hrn from federated site
- #then the login is different from the one found in its hrn
+ #In case the user was not imported from Iotlab LDAP
+ #but from another federated site, has an account in
+ #iotlab but currently using his hrn from federated site
+ #then the login is different from the one found in its hrn
if tmpname != hrn.split('.')[1]:
hrn = None
else:
hrn = None
-
-
-
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname ,
- 'email':tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer' : -1,
- 'hrn': hrn,
+
+ results = {
+ 'type': 'user',
+ 'pkey': ldapentry['sshPublicKey'],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname,
+ 'email': tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry['givenName'][0],
+ 'last_name': ldapentry['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': parent_hrn,
+ 'peer_authority': peer_authority,
+ 'pointer': -1,
+ 'hrn': hrn,
}
return results
-
-
- def LdapFindUser(self, record = None, is_user_enabled=None, \
- expected_fields = None):
+
+ def LdapFindUser(self, record=None, is_user_enabled=None,
+ expected_fields=None):
"""
- Search a SFA user with a hrn. User should be already registered
- in Senslab LDAP.
+
+ Search a SFA user with a hrn. User should be already registered
+ in Iotlab LDAP.
+
:param record: sfa user's record. Should contain first_name,last_name,
- email or mail. If no record is provided, returns all the users found
- in LDAP.
+ email or mail. If no record is provided, returns all the users found
+ in LDAP.
:type record: dict
- :param is_user_enabled: is the user's senslab account already valid.
- :type is_user_enabled: Boolean.
- :return: LDAP entries from ldap matching the filter provided. Returns
- a single entry if one filter has been given and a list of
- entries otherwise.
+ :param is_user_enabled: is the user's iotlab account already valid.
+ :type is_user_enabled: Boolean.
+ :returns: LDAP entries from ldap matching the filter provided. Returns
+ a single entry if one filter has been given and a list of
+ entries otherwise.
:rtype: dict or list
- """
+
+ """
custom_record = {}
- if is_user_enabled:
+ if is_user_enabled:
custom_record['enabled'] = is_user_enabled
- if record:
+ if record:
custom_record.update(record)
-
- req_ldap = self.make_ldap_filters_from_record(custom_record)
+ req_ldap = self.make_ldap_filters_from_record(custom_record)
return_fields_list = []
- if expected_fields == None :
- return_fields_list = ['mail', 'givenName', 'sn', 'uid', \
- 'sshPublicKey']
- else :
+ if expected_fields is None:
+ return_fields_list = ['mail', 'givenName', 'sn', 'uid',
+ 'sshPublicKey']
+ else:
return_fields_list = expected_fields
-
- result_data = self.LdapSearch(req_ldap, return_fields_list )
- logger.debug("LDAP.PY \t LdapFindUser result_data %s" %(result_data))
-
- if len(result_data) is 0:
+
+ result_data = self.LdapSearch(req_ldap, return_fields_list)
+ logger.debug("LDAP.PY \t LdapFindUser result_data %s" % (result_data))
+
+ if len(result_data) == 0:
return None
#Asked for a specific user
- if record :
+ if record is not None:
results = self._process_ldap_info_for_one_user(record, result_data)
-
+
else:
#Asked for all users in ldap
results = self._process_ldap_info_for_all_users(result_data)
- return results
-
+ return results
\ No newline at end of file
--- /dev/null
+"""
+File providing methods to generate valid RSpecs for the Iotlab testbed.
+Contains methods to get information on slice, slivers, nodes and leases,
+formatting them and turn it into a RSpec.
+"""
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, get_authority
+
+from sfa.rspecs.rspec import RSpec
+#from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.rspecs.elements.versions.cortexlabv1Node import IotlabPosition, \
+ IotlabNode, IotlabLocation, IotlabMobility
+
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn
+
+
+def cortexlab_xrn_to_hostname(xrn):
+ """Returns a node's hostname from its xrn.
+ :param xrn: The nodes xrn identifier.
+ :type xrn: Xrn (from sfa.util.xrn)
+
+ :returns: node's hostname.
+ :rtype: string
+
+ """
+ return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
+
+
+def cortexlab_xrn_object(root_auth, hostname):
+ """Creates a valid xrn object from the node's hostname and the authority
+ of the SFA server.
+
+ :param hostname: the node's hostname.
+ :param root_auth: the SFA root authority.
+ :type hostname: string
+ :type root_auth: string
+
+ :returns: the cortexlab node's xrn
+ :rtype: Xrn
+
+ """
+ return Xrn('.'.join([root_auth, Xrn.escape(hostname)]), type='node')
+
+
+class CortexlabAggregate:
+ """Aggregate manager class for Iotlab. """
+
+ sites = {}
+ nodes = {}
+ api = None
+ interfaces = {}
+ links = {}
+ node_tags = {}
+
+ prepared = False
+
+ user_options = {}
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_slice_and_slivers(self, slice_xrn, login=None):
+ """
+ Get the slices and the associated leases if any, from the cortexlab
+ testbed. One slice can have mutliple leases.
+ For each slice, get the nodes in the associated lease
+ and create a sliver with the necessary info and insert it into the
+ sliver dictionary, keyed on the node hostnames.
+ Returns a dict of slivers based on the sliver's node_id.
+ Called by get_rspec.
+
+
+ :param slice_xrn: xrn of the slice
+ :param login: user's login on cortexlab ldap
+
+ :type slice_xrn: string
+ :type login: string
+ :returns: a list of slices dict and a list of Sliver object
+ :rtype: (list, list)
+
+ .. note: There is no real slivers in cortexlab, only leases. The goal
+ is to be consistent with the SFA standard.
+
+ """
+ slivers = {}
+ sfa_slice = None
+ if slice_xrn is None:
+ return (sfa_slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = slice_hrn
+
+ slices = self.driver.cortexlab_api.GetSlices(slice_filter=str(slice_name),
+ slice_filter_type='slice_hrn',
+ login=login)
+
+ logger.debug("CortexlabAggregate api \tget_slice_and_slivers \
+ slice_hrn %s \r\n slices %s self.driver.hrn %s"
+ % (slice_hrn, slices, self.driver.hrn))
+
+ if slices == []:
+ return (sfa_slice, slivers)
+
+ # sort slivers by node id , if there is a job
+ #and therefore, node allocated to this slice
+ for sfa_slice in slices:
+ try:
+ node_ids_list = sfa_slice['node_ids']
+ except KeyError:
+ logger.log_exc("CortexlabAggregate \t \
+ get_slice_and_slivers No nodes in the slice \
+ - KeyError ")
+ node_ids_list = []
+ continue
+
+ for node in node_ids_list:
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
+ sliver_xrn.set_authority(self.driver.hrn)
+ sliver = Sliver({'sliver_id': sliver_xrn.urn,
+ 'name': sfa_slice['hrn'],
+ 'type': 'cortexlab-node',
+ 'tags': []})
+
+ slivers[node] = sliver
+
+ #Add default sliver attribute :
+ #connection information for cortexlab
+ if get_authority(sfa_slice['hrn']) == self.driver.cortexlab_api.root_auth:
+ tmp = sfa_slice['hrn'].split('.')
+ ldap_username = tmp[1].split('_')[0]
+ ssh_access = None
+ slivers['default_sliver'] = {'ssh': ssh_access,
+ 'login': ldap_username}
+
+ #TODO get_slice_and_slivers Find the login of the external user
+
+ logger.debug("CortexlabAggregate api get_slice_and_slivers slivers %s "
+ % (slivers))
+ return (slices, slivers)
+
+
+ def get_nodes(self, slices=None, slivers=[], options=None):
+ """Returns the nodes in the slice using the rspec format, with all the
+ nodes' properties.
+
+ Fetch the nodes ids in the slices dictionary and get all the nodes
+ properties from OAR. Makes a rspec dicitonary out of this and returns
+ it. If the slice does not have any job running or scheduled, that is
+ it has no reserved nodes, then returns an empty list.
+
+ :param slices: list of slices (record dictionaries)
+ :param slivers: the list of slivers in all the slices
+ :type slices: list of dicts
+ :type slivers: list of Sliver object (dictionaries)
+ :returns: An empty list if the slice has no reserved nodes, a rspec
+ list with all the nodes and their properties (a dict per node)
+ otherwise.
+ :rtype: list
+
+ .. seealso:: get_slice_and_slivers
+
+ """
+ # NT: the semantic of this function is not clear to me :
+ # if slice is not defined, then all the nodes should be returned
+ # if slice is defined, we should return only the nodes that
+ # are part of this slice
+ # but what is the role of the slivers parameter ?
+ # So i assume that slice['node_ids'] will be the same as slivers for us
+ slice_nodes_list = []
+ if slices is not None:
+ for one_slice in slices:
+ try:
+ slice_nodes_list = one_slice['node_ids']
+ # if we are dealing with a slice that has no node just
+ # return an empty list. In cortexlab a slice can have multiple
+ # jobs scheduled, so it either has at least one lease or
+ # not at all.
+ except KeyError:
+ return []
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.cortexlab_api.GetLeaseGranularity()
+
+ nodes = self.driver.cortexlab_api.GetNodes()
+
+ nodes_dict = {}
+
+ #if slices, this means we got to list all the nodes given to this slice
+ # Make a list of all the nodes in the slice before getting their
+ #attributes
+ rspec_nodes = []
+
+ logger.debug("CortexlabAggregate api get_nodes slices %s "
+ % (slices))
+
+ reserved_nodes = self.driver.cortexlab_api.GetNodesCurrentlyInUse()
+ logger.debug("CortexlabAggregate api get_nodes slice_nodes_list %s "
+ % (slice_nodes_list))
+ for node in nodes:
+ nodes_dict[node['node_id']] = node
+ if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
+
+ rspec_node = IotlabNode()
+ # xxx how to retrieve site['login_base']
+ #site_id=node['site_id']
+ #site=sites_dict[site_id]
+
+ # rspec_node['mobile'] = node['mobile']
+ rspec_node['archi'] = node['archi']
+ rspec_node['radio'] = node['radio']
+
+ cortexlab_xrn = cortexlab_xrn_object(self.driver.cortexlab_api.root_auth,
+ node['hostname'])
+ rspec_node['component_id'] = cortexlab_xrn.urn
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = \
+ hrn_to_urn(self.driver.cortexlab_api.root_auth,
+ 'authority+sa')
+
+ # Iotlab's nodes are federated : there is only one authority
+ # for all Iotlab sites, registered in SFA.
+ # Removing the part including the site
+ # in authority_id SA 27/07/12
+ rspec_node['authority_id'] = rspec_node['component_manager_id']
+
+ # do not include boot state (<available> element)
+ #in the manifest rspec
+
+ rspec_node['boot_state'] = node['boot_state']
+ if node['hostname'] in reserved_nodes:
+ rspec_node['boot_state'] = "Reserved"
+ rspec_node['exclusive'] = 'true'
+ rspec_node['hardware_types'] = [HardwareType({'name':
+ 'cortexlab-node'})]
+
+
+ location = IotlabLocation({'country':'France', 'site':
+ node['site']})
+ rspec_node['location'] = location
+
+ # Adding mobility of the node in the rspec
+ mobility = IotlabMobility()
+ for field in mobility:
+ try:
+ mobility[field] = node[field]
+ except KeyError, error:
+ logger.log_exc("CortexlabAggregate\t get_nodes \
+ mobility %s " % (error))
+ rspec_node['mobility'] = mobility
+
+ position = IotlabPosition()
+ for field in position:
+ try:
+ position[field] = node[field]
+ except KeyError, error:
+ logger.log_exc("CortexlabAggregate\t get_nodes \
+ position %s " % (error))
+
+ rspec_node['position'] = position
+ #rspec_node['interfaces'] = []
+
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+ rspec_node['tags'] = []
+ if node['hostname'] in slivers:
+ # add sliver info
+ sliver = slivers[node['hostname']]
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+ # slivers always provide the ssh service
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': node['hostname'], 'port': '22',
+ 'username': sliver['name']})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
+ rspec_nodes.append(rspec_node)
+
+ return (rspec_nodes)
+
+ def get_all_leases(self, ldap_username):
+ """
+
+ Get list of lease dictionaries which all have the mandatory keys
+ ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
+ All the leases running or scheduled are returned.
+
+ :param ldap_username: if ldap uid is not None, looks for the leases
+ belonging to this user.
+ :type ldap_username: string
+ :returns: rspec lease dictionary with keys lease_id, component_id,
+ slice_id, start_time, duration.
+ :rtype: dict
+
+ .. note::There is no filtering of leases within a given time frame.
+ All the running or scheduled leases are returned. options
+ removed SA 15/05/2013
+
+
+ """
+
+ #now = int(time.time())
+ #lease_filter = {'clip': now }
+
+ #if slice_record:
+ #lease_filter.update({'name': slice_record['name']})
+
+ #leases = self.driver.cortexlab_api.GetLeases(lease_filter)
+
+ logger.debug("CortexlabAggregate get_all_leases ldap_username %s "
+ % (ldap_username))
+ leases = self.driver.cortexlab_api.GetLeases(login=ldap_username)
+ grain = self.driver.cortexlab_api.GetLeaseGranularity()
+ # site_ids = []
+ rspec_leases = []
+ for lease in leases:
+ #as many leases as there are nodes in the job
+ for node in lease['reserved_nodes']:
+ rspec_lease = Lease()
+ rspec_lease['lease_id'] = lease['lease_id']
+ #site = node['site_id']
+ cortexlab_xrn = cortexlab_xrn_object(self.driver.cortexlab_api.root_auth,
+ node)
+ rspec_lease['component_id'] = cortexlab_xrn.urn
+ #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
+ #site, node['hostname'])
+ try:
+ rspec_lease['slice_id'] = lease['slice_id']
+ except KeyError:
+ #No info on the slice used in cortexlab_xp table
+ pass
+ rspec_lease['start_time'] = lease['t_from']
+ rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
+ / grain
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
+
+ def get_rspec(self, slice_xrn=None, login=None, version=None,
+ options=None):
+ """
+ Returns xml rspec:
+ - a full advertisement rspec with the testbed resources if slice_xrn is
+ not specified.If a lease option is given, also returns the leases
+ scheduled on the testbed.
+ - a manifest Rspec with the leases and nodes in slice's leases if
+ slice_xrn is not None.
+
+ :param slice_xrn: srn of the slice
+ :type slice_xrn: string
+ :param login: user'uid (ldap login) on cortexlab
+ :type login: string
+ :param version: can be set to sfa or cortexlab
+ :type version: RSpecVersion
+ :param options: used to specify if the leases should also be included in
+ the returned rspec.
+ :type options: dict
+
+ :returns: Xml Rspec.
+ :rtype: XML
+
+
+ """
+
+ ldap_username = None
+ rspec = None
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ logger.debug("CortexlabAggregate \t get_rspec ***version %s \
+ version.type %s version.version %s options %s \r\n"
+ % (version, version.type, version.version, options))
+
+ if slice_xrn is None:
+ rspec_version = version_manager._get_version(version.type,
+ version.version, 'ad')
+
+ else:
+ rspec_version = version_manager._get_version(
+ version.type, version.version, 'manifest')
+
+ slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
+ if slice_xrn and slices is not None:
+ #Get user associated with this slice
+ #for one_slice in slices :
+ ldap_username = slices[0]['reg_researchers'][0].__dict__['hrn']
+ # ldap_username = slices[0]['user']
+ tmp = ldap_username.split('.')
+ ldap_username = tmp[1]
+ logger.debug("CortexlabAggregate \tget_rspec **** \
+ LDAP USERNAME %s \r\n" \
+ % (ldap_username))
+ #at this point sliver may be empty if no cortexlab job
+ #is running for this user/slice.
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+ logger.debug("\r\n \r\n CortexlabAggregate \tget_rspec *** \
+ slice_xrn %s slices %s\r\n \r\n"
+ % (slice_xrn, slices))
+
+ if options is not None and 'list_leases' in options:
+ lease_option = options['list_leases']
+ else:
+ #If no options are specified, at least print the resources
+ lease_option = 'all'
+ #if slice_xrn :
+ #lease_option = 'all'
+
+ if lease_option in ['all', 'resources']:
+ #if not options.get('list_leases') or options.get('list_leases')
+ #and options['list_leases'] != 'leases':
+ nodes = self.get_nodes(slices, slivers)
+ if slice_xrn and slices is None:
+ nodes = []
+ logger.debug("\r\n")
+ logger.debug("CortexlabAggregate \t lease_option %s \
+ get rspec ******* nodes %s"
+ % (lease_option, nodes))
+
+ sites_set = set([node['location']['site'] for node in nodes])
+
+ #In case creating a job, slice_xrn is not set to None
+ rspec.version.add_nodes(nodes)
+ if slice_xrn and slices is not None:
+ # #Get user associated with this slice
+ # #for one_slice in slices :
+ # ldap_username = slices[0]['reg_researchers']
+ # # ldap_username = slices[0]['user']
+ # tmp = ldap_username.split('.')
+ # ldap_username = tmp[1]
+ # # ldap_username = tmp[1].split('_')[0]
+
+ logger.debug("CortexlabAggregate \tget_rspec **** \
+ version type %s ldap_ user %s \r\n" \
+ % (version.type, ldap_username))
+ if version.type == "Iotlab":
+ rspec.version.add_connection_information(
+ ldap_username, sites_set)
+
+ default_sliver = slivers.get('default_sliver', [])
+ if default_sliver and len(nodes) is not 0:
+ #default_sliver_attribs = default_sliver.get('tags', [])
+ logger.debug("CortexlabAggregate \tget_rspec **** \
+ default_sliver%s \r\n" % (default_sliver))
+ for attrib in default_sliver:
+ rspec.version.add_default_sliver_attribute(
+ attrib, default_sliver[attrib])
+
+ if lease_option in ['all','leases']:
+ leases = self.get_all_leases(ldap_username)
+ rspec.version.add_leases(leases)
+ logger.debug("CortexlabAggregate \tget_rspec **** \
+ FINAL RSPEC %s \r\n" % (rspec.toxml()))
+ return rspec.toxml()
--- /dev/null
+"""
+File containing the IotlabTestbedAPI, used to interact with nodes, users,
+slices, leases and keys, as well as the dedicated iotlab database and table,
+holding information about which slice is running which job.
+
+"""
+from datetime import datetime
+
+from sfa.util.sfalogging import logger
+
+from sfa.storage.alchemy import dbsession
+from sqlalchemy.orm import joinedload
+from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
+
+from sfa.iotlab.iotlabpostgres import TestbedAdditionalSfaDB, LeaseTableXP
+from sfa.cortexlab.LDAPapi import LDAPapi
+
+from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
+
+from sfa.trust.certificate import Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+from sfa.trust.hierarchy import Hierarchy
+
+from sfa.iotlab.iotlabaggregate import iotlab_xrn_object
+
+class CortexlabTestbedAPI():
+ """ Class enabled to use LDAP and OAR api calls. """
+
+ _MINIMUM_DURATION = 10 # 10 units of granularity 60 s, 10 mins
+
+ def __init__(self, config):
+ """Creates an instance of OARrestapi and LDAPapi which will be used to
+ issue calls to OAR or LDAP methods.
+ Set the time format and the testbed granularity used for OAR
+ reservation and leases.
+
+ :param config: configuration object from sfa.util.config
+ :type config: Config object
+ """
+ self.cortexlab_leases_db = TestbedAdditionalSfaDB(config)
+ self.query_sites = CortexlabQueryNodes()
+ self.ldap = LDAPapi()
+ self.time_format = "%Y-%m-%d %H:%M:%S"
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.grain = 60 # 10 mins lease minimum, 60 sec granularity
+ #import logging, logging.handlers
+ #from sfa.util.sfalogging import _SfaLogger
+ #sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', \
+ #level=logging.DEBUG)
+ return
+
+ @staticmethod
+ def GetMinExperimentDurationInGranularity():
+ """ Returns the minimum allowed duration for an experiment on the
+ testbed. In seconds.
+
+ """
+ return CortexlabTestbedAPI._MINIMUM_DURATION
+
+ @staticmethod
+ def GetPeers(peer_filter=None ):
+ """ Gathers registered authorities in SFA DB and looks for specific peer
+ if peer_filter is specified.
+ :param peer_filter: name of the site authority looked for.
+ :type peer_filter: string
+ :returns: list of records.
+
+ """
+
+ existing_records = {}
+ existing_hrns_by_types = {}
+ logger.debug("CORTEXLAB_API \tGetPeers peer_filter %s " % (peer_filter))
+ all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
+
+ for record in all_records:
+ existing_records[(record.hrn, record.type)] = record
+ if record.type not in existing_hrns_by_types:
+ existing_hrns_by_types[record.type] = [record.hrn]
+ else:
+ existing_hrns_by_types[record.type].append(record.hrn)
+
+ logger.debug("CORTEXLAB_API \tGetPeer\texisting_hrns_by_types %s "
+ % (existing_hrns_by_types))
+ records_list = []
+
+ try:
+ if peer_filter:
+ records_list.append(existing_records[(peer_filter,
+ 'authority')])
+ else:
+ for hrn in existing_hrns_by_types['authority']:
+ records_list.append(existing_records[(hrn, 'authority')])
+
+ logger.debug("CORTEXLAB_API \tGetPeer \trecords_list %s "
+ % (records_list))
+
+ except KeyError:
+ pass
+
+ return_records = records_list
+ logger.debug("CORTEXLAB_API \tGetPeer return_records %s "
+ % (return_records))
+ return return_records
+
+ #TODO : Handling OR request in make_ldap_filters_from_records
+ #instead of the for loop
+ #over the records' list
+ def GetPersons(self, person_filter=None):
+ """
+ Get the enabled users and their properties from Cortexlab LDAP.
+ If a filter is specified, looks for the user whose properties match
+ the filter, otherwise returns the whole enabled users'list.
+
+ :param person_filter: Must be a list of dictionnaries with users
+ properties when not set to None.
+ :type person_filter: list of dict
+
+ :returns: Returns a list of users whose accounts are enabled
+ found in ldap.
+ :rtype: list of dicts
+
+ """
+ logger.debug("CORTEXLAB_API \tGetPersons person_filter %s"
+ % (person_filter))
+ person_list = []
+ if person_filter and isinstance(person_filter, list):
+ #If we are looking for a list of users (list of dict records)
+ #Usually the list contains only one user record
+ for searched_attributes in person_filter:
+
+ #Get only enabled user accounts in iotlab LDAP :
+ #add a filter for make_ldap_filters_from_record
+ person = self.ldap.LdapFindUser(searched_attributes,
+ is_user_enabled=True)
+ #If a person was found, append it to the list
+ if person:
+ person_list.append(person)
+
+ #If the list is empty, return None
+ if len(person_list) is 0:
+ person_list = None
+
+ else:
+ #Get only enabled user accounts in iotlab LDAP :
+ #add a filter for make_ldap_filters_from_record
+ person_list = self.ldap.LdapFindUser(is_user_enabled=True)
+
+ return person_list
+
+
+
+ def DeleteOneLease(self, lease_id, username):
+ """
+
+ Deletes the lease with the specified lease_id and username on OAR by
+ posting a delete request to OAR.
+
+ :param lease_id: Reservation identifier.
+ :param username: user's iotlab login in LDAP.
+ :type lease_id: Depends on what tou are using, could be integer or
+ string
+ :type username: string
+
+ :returns: dictionary with the lease id and if delete has been successful
+ (True) or no (False)
+ :rtype: dict
+
+ """
+
+ # Here delete the lease specified
+ answer = self.query_sites.delete_experiment(lease_id, username)
+
+ # If the username is not necessary to delete the lease, then you can
+ # remove it from the parameters, given that you propagate the changes
+ # Return delete status so that you know if the delete has been
+ # successuf or not
+
+
+ if answer['status'] is True:
+ ret = {lease_id: True}
+ else:
+ ret = {lease_id: False}
+ logger.debug("CORTEXLAB_API \DeleteOneLease lease_id %s \r\n answer %s \
+ username %s" % (lease_id, answer, username))
+ return ret
+
+
+
+ def GetNodesCurrentlyInUse(self):
+ """Returns a list of all the nodes involved in a currently running
+ experiment (and only the one not available at the moment the call to
+ this method is issued)
+ :rtype: list of nodes hostnames.
+ """
+ node_hostnames_list = []
+ return node_hostnames_list
+
+
+ def GetReservedNodes(self, username=None):
+ """ Get list of leases. Get the leases for the username if specified,
+ otherwise get all the leases. Finds the nodes hostnames for each
+ OAR node identifier.
+ :param username: user's LDAP login
+ :type username: string
+ :returns: list of reservations dict
+ :rtype: dict list
+ """
+
+ #Get the nodes in use and the reserved nodes
+ mandatory_sfa_keys = ['reserved_nodes','lease_id']
+ reservation_dict_list = \
+ self.query_sites.get_reserved_nodes(username = username)
+
+ if len(reservation_dict_list) == 0:
+ return []
+
+ else:
+ # Ensure mandatory keys are in the dict
+ if not self.ensure_format_is_valid(reservation_dict_list,
+ mandatory_sfa_keys):
+ raise KeyError, "GetReservedNodes : Missing SFA mandatory keys"
+
+
+ return reservation_dict_list
+
+ @staticmethod
+ def ensure_format_is_valid(list_dictionary_to_check, mandatory_keys_list):
+ for entry in list_dictionary_to_check:
+ if not all (key in entry for key in mandatory_keys_list):
+ return False
+ return True
+
+ def GetNodes(self, node_filter_dict=None, return_fields_list=None):
+ """
+
+ Make a list of cortexlab nodes and their properties from information
+ given by ?. Search for specific nodes if some filters are
+ specified. Nodes properties returned if no return_fields_list given:
+ 'hrn','archi','mobile','hostname','site','boot_state','node_id',
+ 'radio','posx','posy,'posz'.
+
+ :param node_filter_dict: dictionnary of lists with node properties. For
+ instance, if you want to look for a specific node with its hrn,
+ the node_filter_dict should be {'hrn': [hrn_of_the_node]}
+ :type node_filter_dict: dict
+ :param return_fields_list: list of specific fields the user wants to be
+ returned.
+ :type return_fields_list: list
+ :returns: list of dictionaries with node properties. Mandatory
+ properties hrn, site, hostname. Complete list (iotlab) ['hrn',
+ 'archi', 'mobile', 'hostname', 'site', 'mobility_type',
+ 'boot_state', 'node_id','radio', 'posx', 'posy', 'oar_id', 'posz']
+ Radio, archi, mobile and position are useful to help users choose
+ the appropriate nodes.
+ :rtype: list
+
+ :TODO: FILL IN THE BLANKS
+ """
+
+ # Here get full dict of nodes with all their properties.
+ mandatory_sfa_keys = ['hrn', 'site', 'hostname']
+ node_list_dict = self.query_sites.get_all_nodes(node_filter_dict,
+ return_fields_list)
+
+ if len(node_list_dict) == 0:
+ return_node_list = []
+
+ else:
+ # Ensure mandatory keys are in the dict
+ if not self.ensure_format_is_valid(node_list_dict,
+ mandatory_sfa_keys):
+ raise KeyError, "GetNodes : Missing SFA mandatory keys"
+
+
+ return_node_list = node_list_dict
+ return return_node_list
+
+
+
+ @staticmethod
+ def AddSlice(slice_record, user_record):
+ """
+
+ Add slice to the local cortexlab sfa tables if the slice comes
+ from a federated site and is not yet in the cortexlab sfa DB,
+ although the user has already a LDAP login.
+ Called by verify_slice during lease/sliver creation.
+
+ :param slice_record: record of slice, must contain hrn, gid, slice_id
+ and authority of the slice.
+ :type slice_record: dictionary
+ :param user_record: record of the user
+ :type user_record: RegUser
+
+ """
+
+ sfa_record = RegSlice(hrn=slice_record['hrn'],
+ gid=slice_record['gid'],
+ pointer=slice_record['slice_id'],
+ authority=slice_record['authority'])
+ logger.debug("CORTEXLAB_API.PY AddSlice sfa_record %s user_record %s"
+ % (sfa_record, user_record))
+ sfa_record.just_created()
+ dbsession.add(sfa_record)
+ dbsession.commit()
+ #Update the reg-researcher dependance table
+ sfa_record.reg_researchers = [user_record]
+ dbsession.commit()
+
+ return
+
+
+ def GetSites(self, site_filter_name_list=None, return_fields_list=None):
+ """Returns the list of Cortexlab's sites with the associated nodes and
+ the sites' properties as dictionaries. Used in import.
+
+ Site properties:
+ ['address_ids', 'slice_ids', 'name', 'node_ids', 'url', 'person_ids',
+ 'site_tag_ids', 'enabled', 'site', 'longitude', 'pcu_ids',
+ 'max_slivers', 'max_slices', 'ext_consortium_id', 'date_created',
+ 'latitude', 'is_public', 'peer_site_id', 'peer_id', 'abbreviated_name']
+ can be empty ( []): address_ids, slice_ids, pcu_ids, person_ids,
+ site_tag_ids
+
+ :param site_filter_name_list: used to specify specific sites
+ :param return_fields_list: field that has to be returned
+ :type site_filter_name_list: list
+ :type return_fields_list: list
+ :rtype: list of dicts
+
+ """
+ site_list_dict = self.query_sites.get_sites(site_filter_name_list,
+ return_fields_list)
+
+ mandatory_sfa_keys = ['name', 'node_ids', 'longitude','site' ]
+
+ if len(site_list_dict) == 0:
+ return_site_list = []
+
+ else:
+ # Ensure mandatory keys are in the dict
+ if not self.ensure_format_is_valid(site_list_dict,
+ mandatory_sfa_keys):
+ raise KeyError, "GetSites : Missing sfa mandatory keys"
+
+ return_site_list = site_list_dict
+ return return_site_list
+
+
+ #TODO : Check rights to delete person
+ def DeletePerson(self, person_record):
+ """Disable an existing account in cortexlab LDAP.
+
+ Users and techs can only delete themselves. PIs can only
+ delete themselves and other non-PIs at their sites.
+ ins can delete anyone.
+
+ :param person_record: user's record
+ :type person_record: dict
+ :returns: True if successful, False otherwise.
+ :rtype: boolean
+
+ .. todo:: CHECK THAT ONLY THE USER OR ADMIN CAN DEL HIMSELF.
+ """
+ #Disable user account in iotlab LDAP
+ ret = self.ldap.LdapMarkUserAsDeleted(person_record)
+ logger.warning("CORTEXLAB_API DeletePerson %s " % (person_record))
+ return ret['bool']
+
+ def DeleteSlice(self, slice_record):
+ """Deletes the specified slice and kills the jobs associated with
+ the slice if any, using DeleteSliceFromNodes.
+
+ :param slice_record: record of the slice, must contain experiment_id, user
+ :type slice_record: dict
+ :returns: True if all the jobs in the slice have been deleted,
+ or the list of jobs that could not be deleted otherwise.
+ :rtype: list or boolean
+
+ .. seealso:: DeleteSliceFromNodes
+
+ """
+ ret = self.DeleteSliceFromNodes(slice_record)
+ delete_failed = None
+ for experiment_id in ret:
+ if False in ret[experiment_id]:
+ if delete_failed is None:
+ delete_failed = []
+ delete_failed.append(experiment_id)
+
+ logger.info("CORTEXLAB_API DeleteSlice %s answer %s"%(slice_record, \
+ delete_failed))
+ return delete_failed or True
+
+ @staticmethod
+ def __add_person_to_db(user_dict):
+ """
+ Add a federated user straight to db when the user issues a lease
+ request with iotlab nodes and that he has not registered with cortexlab
+ yet (that is he does not have a LDAP entry yet).
+ Uses parts of the routines in CortexlabImport when importing user
+ from LDAP.
+ Called by AddPerson, right after LdapAddUser.
+ :param user_dict: Must contain email, hrn and pkey to get a GID
+ and be added to the SFA db.
+ :type user_dict: dict
+
+ """
+ check_if_exists = \
+ dbsession.query(RegUser).filter_by(email = user_dict['email']).first()
+ #user doesn't exists
+ if not check_if_exists:
+ logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
+ " %(user_dict))
+ hrn = user_dict['hrn']
+ person_urn = hrn_to_urn(hrn, 'user')
+ pubkey = user_dict['pkey']
+ try:
+ pkey = convert_public_key(pubkey)
+ except TypeError:
+ #key not good. create another pkey
+ logger.warn('__add_person_to_db: unable to convert public \
+ key for %s' %(hrn ))
+ pkey = Keypair(create=True)
+
+
+ if pubkey is not None and pkey is not None :
+ hierarchy = Hierarchy()
+ person_gid = hierarchy.create_gid(person_urn, create_uuid(), \
+ pkey)
+ if user_dict['email']:
+ logger.debug("__add_person_to_db \r\n \r\n \
+ IOTLAB IMPORTER PERSON EMAIL OK email %s "\
+ %(user_dict['email']))
+ person_gid.set_email(user_dict['email'])
+
+ user_record = RegUser(hrn=hrn , pointer= '-1', \
+ authority=get_authority(hrn), \
+ email=user_dict['email'], gid = person_gid)
+ user_record.reg_keys = [RegKey(user_dict['pkey'])]
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ return
+
+
+ def AddPerson(self, record):
+ """
+
+ Adds a new account. Any fields specified in records are used,
+ otherwise defaults are used. Creates an appropriate login by calling
+ LdapAddUser.
+
+ :param record: dictionary with the sfa user's properties.
+ :returns: a dicitonary with the status. If successful, the dictionary
+ boolean is set to True and there is a 'uid' key with the new login
+ added to LDAP, otherwise the bool is set to False and a key
+ 'message' is in the dictionary, with the error message.
+ :rtype: dict
+
+ """
+ ret = self.ldap.LdapAddUser(record)
+
+ if ret['bool'] is True:
+ record['hrn'] = self.root_auth + '.' + ret['uid']
+ logger.debug("CORTEXLAB_API AddPerson return code %s record %s "
+ % (ret, record))
+ self.__add_person_to_db(record)
+ return ret
+
+
+
+
+
+ #TODO AddPersonKey 04/07/2012 SA
+ def AddPersonKey(self, person_uid, old_attributes_dict, new_key_dict):
+ """Adds a new key to the specified account. Adds the key to the
+ iotlab ldap, provided that the person_uid is valid.
+
+ Non-admins can only modify their own keys.
+
+ :param person_uid: user's iotlab login in LDAP
+ :param old_attributes_dict: dict with the user's old sshPublicKey
+ :param new_key_dict: dict with the user's new sshPublicKey
+ :type person_uid: string
+
+
+ :rtype: Boolean
+ :returns: True if the key has been modified, False otherwise.
+
+ """
+ ret = self.ldap.LdapModify(person_uid, old_attributes_dict, \
+ new_key_dict)
+ logger.warning("CORTEXLAB_API AddPersonKey EMPTY - DO NOTHING \r\n ")
+ return ret['bool']
+
+
+ @staticmethod
+ def _process_walltime(duration):
+ """ Calculates the walltime in seconds from the duration in H:M:S
+ specified in the RSpec.
+
+ """
+ if duration:
+ # Fixing the walltime by adding a few delays.
+ # First put the walltime in seconds oarAdditionalDelay = 20;
+ # additional delay for /bin/sleep command to
+ # take in account prologue and epilogue scripts execution
+ # int walltimeAdditionalDelay = 240; additional delay
+ #for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
+ #in oar.conf
+ # Put the duration in seconds first
+ #desired_walltime = duration * 60
+ desired_walltime = duration
+ total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
+ sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
+ walltime = []
+ #Put the walltime back in str form
+ #First get the hours
+ walltime.append(str(total_walltime / 3600))
+ total_walltime = total_walltime - 3600 * int(walltime[0])
+ #Get the remaining minutes
+ walltime.append(str(total_walltime / 60))
+ total_walltime = total_walltime - 60 * int(walltime[1])
+ #Get the seconds
+ walltime.append(str(total_walltime))
+
+ else:
+ logger.log_exc(" __process_walltime duration null")
+
+ return walltime, sleep_walltime
+
+ @staticmethod
+ def _create_job_structure_request_for_OAR(lease_dict):
+ """ Creates the structure needed for a correct POST on OAR.
+ Makes the timestamp transformation into the appropriate format.
+ Sends the POST request to create the job with the resources in
+ added_nodes.
+
+ """
+
+ nodeid_list = []
+ reqdict = {}
+
+
+ reqdict['workdir'] = '/tmp'
+ reqdict['resource'] = "{network_address in ("
+
+ for node in lease_dict['added_nodes']:
+ logger.debug("\r\n \r\n OARrestapi \t \
+ __create_job_structure_request_for_OAR node %s" %(node))
+
+ # Get the ID of the node
+ nodeid = node
+ reqdict['resource'] += "'" + nodeid + "', "
+ nodeid_list.append(nodeid)
+
+ custom_length = len(reqdict['resource'])- 2
+ reqdict['resource'] = reqdict['resource'][0:custom_length] + \
+ ")}/nodes=" + str(len(nodeid_list))
+
+
+ walltime, sleep_walltime = \
+ IotlabTestbedAPI._process_walltime(\
+ int(lease_dict['lease_duration']))
+
+
+ reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
+ ":" + str(walltime[1]) + ":" + str(walltime[2])
+ reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+
+ #In case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if lease_dict['lease_start_time'] is not '0':
+ #Readable time accepted by OAR
+ start_time = datetime.fromtimestamp( \
+ int(lease_dict['lease_start_time'])).\
+ strftime(lease_dict['time_format'])
+ reqdict['reservation'] = start_time
+ #If there is not start time, Immediate XP. No need to add special
+ # OAR parameters
+
+
+ reqdict['type'] = "deploy"
+ reqdict['directory'] = ""
+ reqdict['name'] = "SFA_" + lease_dict['slice_user']
+
+ return reqdict
+
+
+ def LaunchExperimentOnTestbed(self, added_nodes, slice_name, \
+ lease_start_time, lease_duration, slice_user=None):
+
+ """
+ Create an experiment request structure based on the information provided
+ and schedule/run the experiment on the testbed by reserving the nodes.
+ :param added_nodes: list of nodes that belong to the described lease.
+ :param slice_name: the slice hrn associated to the lease.
+ :param lease_start_time: timestamp of the lease startting time.
+ :param lease_duration: lease duration in minutes
+
+ """
+ lease_dict = {}
+ # Add in the dict whatever is necessary to create the experiment on
+ # the testbed
+ lease_dict['lease_start_time'] = lease_start_time
+ lease_dict['lease_duration'] = lease_duration
+ lease_dict['added_nodes'] = added_nodes
+ lease_dict['slice_name'] = slice_name
+ lease_dict['slice_user'] = slice_user
+ lease_dict['grain'] = self.GetLeaseGranularity()
+
+
+
+ answer = self.query_sites.schedule_experiment(lease_dict)
+ try:
+ experiment_id = answer['id']
+ except KeyError:
+ logger.log_exc("CORTEXLAB_API \tLaunchExperimentOnTestbed \
+ Impossible to create xp %s " %(answer))
+ return None
+
+ if experiment_id :
+ logger.debug("CORTEXLAB_API \tLaunchExperimentOnTestbed \
+ experiment_id %s added_nodes %s slice_user %s"
+ %(experiment_id, added_nodes, slice_user))
+
+
+ return experiment_id
+
+
+ def AddLeases(self, hostname_list, slice_record,
+ lease_start_time, lease_duration):
+
+ """Creates an experiment on the testbed corresponding to the information
+ provided as parameters. Adds the experiment id and the slice hrn in the
+ lease table on the additional sfa database so that we are able to know
+ which slice has which nodes.
+
+ :param hostname_list: list of nodes' OAR hostnames.
+ :param slice_record: sfa slice record, must contain login and hrn.
+ :param lease_start_time: starting time , unix timestamp format
+ :param lease_duration: duration in minutes
+
+ :type hostname_list: list
+ :type slice_record: dict
+ :type lease_start_time: integer
+ :type lease_duration: integer
+
+ """
+ logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases hostname_list %s \
+ slice_record %s lease_start_time %s lease_duration %s "\
+ %( hostname_list, slice_record , lease_start_time, \
+ lease_duration))
+
+ username = slice_record['login']
+
+ experiment_id = self.LaunchExperimentOnTestbed(hostname_list, \
+ slice_record['hrn'], \
+ lease_start_time, lease_duration, \
+ username)
+ start_time = \
+ datetime.fromtimestamp(int(lease_start_time)).\
+ strftime(self.time_format)
+ end_time = lease_start_time + lease_duration
+
+
+ logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases TURN ON LOGGING SQL \
+ %s %s %s "%(slice_record['hrn'], experiment_id, end_time))
+
+
+ logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases %s %s %s " \
+ %(type(slice_record['hrn']), type(experiment_id),
+ type(end_time)))
+
+ testbed_xp_row = LeaseTableXP(slice_hrn=slice_record['hrn'],
+ experiment_id=experiment_id, end_time=end_time)
+
+ logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases testbed_xp_row %s" \
+ %(testbed_xp_row))
+ self.cortexlab_leases_db.testbed_session.add(testbed_xp_row)
+ self.cortexlab_leases_db.testbed_session.commit()
+
+ logger.debug("CORTEXLAB_API \t AddLeases hostname_list start_time %s " \
+ %(start_time))
+
+ return
+
+ def DeleteLeases(self, leases_id_list, slice_hrn):
+ """
+
+ Deletes several leases, based on their experiment ids and the slice
+ they are associated with. Uses DeleteOneLease to delete the
+ experiment on the testbed. Note that one slice can contain multiple
+ experiments, and in this
+ case all the experiments in the leases_id_list MUST belong to this
+ same slice, since there is only one slice hrn provided here.
+
+ :param leases_id_list: list of job ids that belong to the slice whose
+ slice hrn is provided.
+ :param slice_hrn: the slice hrn.
+ :type slice_hrn: string
+
+ .. warning:: Does not have a return value since there was no easy
+ way to handle failure when dealing with multiple job delete. Plus,
+ there was no easy way to report it to the user.
+
+ """
+ logger.debug("CORTEXLAB_API DeleteLeases leases_id_list %s slice_hrn %s \
+ \r\n " %(leases_id_list, slice_hrn))
+ for experiment_id in leases_id_list:
+ self.DeleteOneLease(experiment_id, slice_hrn)
+
+ return
+
+ #Delete the jobs from job_iotlab table
+ def DeleteSliceFromNodes(self, slice_record):
+ """
+ Deletes all the running or scheduled jobs of a given slice
+ given its record.
+
+ :param slice_record: record of the slice, must contain experiment_id,
+ user
+ :type slice_record: dict
+ :returns: dict of the jobs'deletion status. Success= True, Failure=
+ False, for each job id.
+ :rtype: dict
+
+ .. note: used in driver delete_sliver
+
+ """
+ logger.debug("CORTEXLAB_API \t DeleteSliceFromNodes %s "
+ % (slice_record))
+
+ if isinstance(slice_record['experiment_id'], list):
+ experiment_bool_answer = {}
+ for experiment_id in slice_record['experiment_id']:
+ ret = self.DeleteOneLease(experiment_id, slice_record['user'])
+
+ experiment_bool_answer.update(ret)
+
+ else:
+ experiment_bool_answer = [self.DeleteOneLease(
+ slice_record['experiment_id'],
+ slice_record['user'])]
+
+ return experiment_bool_answer
+
+
+
+ def GetLeaseGranularity(self):
+ """ Returns the granularity of an experiment in the Iotlab testbed.
+ OAR uses seconds for experiments duration , the granulaity is also
+ defined in seconds.
+ Experiments which last less than 10 min (600 sec) are invalid"""
+ return self.grain
+
+
+ # @staticmethod
+ # def update_experiments_in_additional_sfa_db( job_oar_list, jobs_psql):
+ # """ Cleans the iotlab db by deleting expired and cancelled jobs.
+ # Compares the list of job ids given by OAR with the job ids that
+ # are already in the database, deletes the jobs that are no longer in
+ # the OAR job id list.
+ # :param job_oar_list: list of job ids coming from OAR
+ # :type job_oar_list: list
+ # :param job_psql: list of job ids cfrom the database.
+ # type job_psql: list
+ # """
+ # #Turn the list into a set
+ # set_jobs_psql = set(jobs_psql)
+
+ # kept_jobs = set(job_oar_list).intersection(set_jobs_psql)
+ # logger.debug ( "\r\n \t\ update_experiments_in_additional_sfa_db jobs_psql %s \r\n \t \
+ # job_oar_list %s kept_jobs %s "%(set_jobs_psql, job_oar_list, kept_jobs))
+ # deleted_jobs = set_jobs_psql.difference(kept_jobs)
+ # deleted_jobs = list(deleted_jobs)
+ # if len(deleted_jobs) > 0:
+ # self.cortexlab_leases_db.testbed_session.query(LeaseTableXP).filter(LeaseTableXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
+ # self.cortexlab_leases_db.testbed_session.commit()
+
+ # return
+
+ @staticmethod
+ def filter_lease_name(reservation_list, filter_value):
+ filtered_reservation_list = list(reservation_list)
+ logger.debug("CORTEXLAB_API \t filter_lease_name reservation_list %s" \
+ % (reservation_list))
+ for reservation in reservation_list:
+ if 'slice_hrn' in reservation and \
+ reservation['slice_hrn'] != filter_value:
+ filtered_reservation_list.remove(reservation)
+
+ logger.debug("CORTEXLAB_API \t filter_lease_name filtered_reservation_list %s" \
+ % (filtered_reservation_list))
+ return filtered_reservation_list
+
+ @staticmethod
+ def filter_lease_start_time(reservation_list, filter_value):
+ filtered_reservation_list = list(reservation_list)
+
+ for reservation in reservation_list:
+ if 't_from' in reservation and \
+ reservation['t_from'] > filter_value:
+ filtered_reservation_list.remove(reservation)
+
+ return filtered_reservation_list
+
+ def complete_leases_info(self, unfiltered_reservation_list, db_xp_dict):
+
+ """Check that the leases list of dictionaries contains the appropriate
+ fields and piece of information here
+ :param unfiltered_reservation_list: list of leases to be completed.
+ :param db_xp_dict: leases information in the lease_sfa table
+ :returns local_unfiltered_reservation_list: list of leases completed.
+ list of dictionaries describing the leases, with all the needed
+ information (sfa,ldap,nodes)to identify one particular lease.
+ :returns testbed_xp_list: list of experiments'ids running or scheduled
+ on the testbed.
+ :rtype local_unfiltered_reservation_list: list of dict
+ :rtype testbed_xp_list: list
+
+ """
+ testbed_xp_list = []
+ local_unfiltered_reservation_list = list(unfiltered_reservation_list)
+ # slice_hrn and lease_id are in the lease_table,
+ # so they are in the db_xp_dict.
+ # component_id_list : list of nodes xrns
+ # reserved_nodes : list of nodes' hostnames
+ # slice_id : slice urn, can be made from the slice hrn using hrn_to_urn
+ for resa in local_unfiltered_reservation_list:
+
+ #Construct list of scheduled experiments (runing, waiting..)
+ testbed_xp_list.append(resa['lease_id'])
+ #If there is information on the experiment in the lease table
+ #(slice used and experiment id), meaning the experiment was created
+ # using sfa
+ if resa['lease_id'] in db_xp_dict:
+ xp_info = db_xp_dict[resa['lease_id']]
+ logger.debug("CORTEXLAB_API \tGetLeases xp_info %s"
+ % (xp_info))
+ resa['slice_hrn'] = xp_info['slice_hrn']
+ resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+
+ #otherwise, assume it is a cortexlab slice, created via the
+ # cortexlab portal
+ else:
+ resa['slice_id'] = hrn_to_urn(self.root_auth + '.' +
+ resa['user'] + "_slice", 'slice')
+ resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
+
+ resa['component_id_list'] = []
+ #Transform the hostnames into urns (component ids)
+ for node in resa['reserved_nodes']:
+
+ iotlab_xrn = iotlab_xrn_object(self.root_auth, node)
+ resa['component_id_list'].append(iotlab_xrn.urn)
+
+ return local_unfiltered_reservation_list, testbed_xp_list
+
+ def GetLeases(self, lease_filter_dict=None, login=None):
+ """
+
+ Get the list of leases from the testbed with complete information
+ about in which slice is running which experiment ans which nodes are
+ involved.
+ Two purposes:
+ -Fetch all the experiments from the testbed (running, waiting..)
+ complete the reservation information with slice hrn
+ found in testbed_xp table. If not available in the table,
+ assume it is a cortexlab slice.
+ -Updates the cortexlab table, deleting jobs when necessary.
+
+ :returns: reservation_list, list of dictionaries with 'lease_id',
+ 'reserved_nodes','slice_id','user', 'component_id_list',
+ 'slice_hrn', 'resource_ids', 't_from', 't_until'. Other
+ keys can be returned if necessary, such as the 'state' of the lease,
+ if the information has been added in GetReservedNodes.
+ :rtype: list
+
+ """
+
+ unfiltered_reservation_list = self.GetReservedNodes(login)
+
+ reservation_list = []
+ #Find the slice associated with this user ldap uid
+ logger.debug(" CORTEXLAB_API.PY \tGetLeases login %s\
+ unfiltered_reservation_list %s "
+ % (login, unfiltered_reservation_list))
+ #Create user dict first to avoid looking several times for
+ #the same user in LDAP SA 27/07/12
+
+
+ db_xp_query = self.cortexlab_leases_db.testbed_session.query(LeaseTableXP).all()
+ db_xp_dict = dict([(row.experiment_id, row.__dict__)
+ for row in db_xp_query])
+
+ logger.debug("CORTEXLAB_API \tGetLeases db_xp_dict %s"
+ % (db_xp_dict))
+ db_xp_id_list = [row.experiment_id for row in db_xp_query]
+
+ required_fiels_in_leases = ['lease_id',
+ 'reserved_nodes','slice_id', 'user', 'component_id_list',
+ 'slice_hrn', 'resource_ids', 't_from', 't_until']
+
+ # Add any missing information on the leases with complete_leases_info
+ unfiltered_reservation_list, testbed_xp_list = \
+ self.complete_leases_info(unfiltered_reservation_list,
+ db_xp_dict)
+ # Check that the list of leases is complete and have the mandatory
+ # information
+ format_status = self.ensure_format_is_valid(unfiltered_reservation_list,
+ required_fiels_in_leases)
+
+ if not format_status:
+ logger.log_exc("\tCortexlabapi \t GetLeases : Missing fields in \
+ reservation list")
+ raise KeyError, "GetLeases : Missing fields in reservation list "
+
+ if lease_filter_dict:
+ logger.debug("CORTEXLAB_API \tGetLeases \
+ \r\n leasefilter %s" % ( lease_filter_dict))
+
+ filter_dict_functions = {
+ 'slice_hrn' : CortexlabTestbedAPI.filter_lease_name,
+ 't_from' : CortexlabTestbedAPI.filter_lease_start_time
+ }
+
+ reservation_list = list(unfiltered_reservation_list)
+ for filter_type in lease_filter_dict:
+ logger.debug("CORTEXLAB_API \tGetLeases reservation_list %s" \
+ % (reservation_list))
+ reservation_list = filter_dict_functions[filter_type](\
+ reservation_list,lease_filter_dict[filter_type] )
+
+
+ if lease_filter_dict is None:
+ reservation_list = unfiltered_reservation_list
+
+ self.cortexlab_leases_db.update_experiments_in_additional_sfa_db(
+ testbed_xp_list, db_xp_id_list)
+
+ logger.debug(" CORTEXLAB_API.PY \tGetLeases reservation_list %s"
+ % (reservation_list))
+ return reservation_list
+
+
+
+
+#TODO FUNCTIONS SECTION 04/07/2012 SA
+
+ ##TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
+ ##04/07/2012 SA
+ #@staticmethod
+ #def UnBindObjectFromPeer( auth, object_type, object_id, shortname):
+ #""" This method is a hopefully temporary hack to let the sfa correctly
+ #detach the objects it creates from a remote peer object. This is
+ #needed so that the sfa federation link can work in parallel with
+ #RefreshPeer, as RefreshPeer depends on remote objects being correctly
+ #marked.
+ #Parameters:
+ #auth : struct, API authentication structure
+ #AuthMethod : string, Authentication method to use
+ #object_type : string, Object type, among 'site','person','slice',
+ #'node','key'
+ #object_id : int, object_id
+ #shortname : string, peer shortname
+ #FROM PLC DOC
+
+ #"""
+ #logger.warning("CORTEXLAB_API \tUnBindObjectFromPeer EMPTY-\
+ #DO NOTHING \r\n ")
+ #return
+
+ ##TODO Is BindObjectToPeer still necessary ? Currently does nothing
+ ##04/07/2012 SA
+ #|| Commented out 28/05/13 SA
+ #def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
+ #remote_object_id=None):
+ #"""This method is a hopefully temporary hack to let the sfa correctly
+ #attach the objects it creates to a remote peer object. This is needed
+ #so that the sfa federation link can work in parallel with RefreshPeer,
+ #as RefreshPeer depends on remote objects being correctly marked.
+ #Parameters:
+ #shortname : string, peer shortname
+ #remote_object_id : int, remote object_id, set to 0 if unknown
+ #FROM PLC API DOC
+
+ #"""
+ #logger.warning("CORTEXLAB_API \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
+ #return
+
+ ##TODO UpdateSlice 04/07/2012 SA || Commented out 28/05/13 SA
+ ##Funciton should delete and create another job since oin iotlab slice=job
+ #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
+ #"""Updates the parameters of an existing slice with the values in
+ #slice_fields.
+ #Users may only update slices of which they are members.
+ #PIs may update any of the slices at their sites, or any slices of
+ #which they are members. Admins may update any slice.
+ #Only PIs and admins may update max_nodes. Slices cannot be renewed
+ #(by updating the expires parameter) more than 8 weeks into the future.
+ #Returns 1 if successful, faults otherwise.
+ #FROM PLC API DOC
+
+ #"""
+ #logger.warning("CORTEXLAB_API UpdateSlice EMPTY - DO NOTHING \r\n ")
+ #return
+
+ #Unused SA 30/05/13, we only update the user's key or we delete it.
+ ##TODO UpdatePerson 04/07/2012 SA
+ #def UpdatePerson(self, iotlab_hrn, federated_hrn, person_fields=None):
+ #"""Updates a person. Only the fields specified in person_fields
+ #are updated, all other fields are left untouched.
+ #Users and techs can only update themselves. PIs can only update
+ #themselves and other non-PIs at their sites.
+ #Returns 1 if successful, faults otherwise.
+ #FROM PLC API DOC
+
+ #"""
+ ##new_row = FederatedToIotlab(iotlab_hrn, federated_hrn)
+ ##self.cortexlab_leases_db.testbed_session.add(new_row)
+ ##self.cortexlab_leases_db.testbed_session.commit()
+
+ #logger.debug("CORTEXLAB_API UpdatePerson EMPTY - DO NOTHING \r\n ")
+ #return
+
+ @staticmethod
+ def GetKeys(key_filter=None):
+ """Returns a dict of dict based on the key string. Each dict entry
+ contains the key id, the ssh key, the user's email and the
+ user's hrn.
+ If key_filter is specified and is an array of key identifiers,
+ only keys matching the filter will be returned.
+
+ Admin may query all keys. Non-admins may only query their own keys.
+ FROM PLC API DOC
+
+ :returns: dict with ssh key as key and dicts as value.
+ :rtype: dict
+ """
+ if key_filter is None:
+ keys = dbsession.query(RegKey).options(joinedload('reg_user')).all()
+ else:
+ keys = dbsession.query(RegKey).options(joinedload('reg_user')).filter(RegKey.key.in_(key_filter)).all()
+
+ key_dict = {}
+ for key in keys:
+ key_dict[key.key] = {'key_id': key.key_id, 'key': key.key,
+ 'email': key.reg_user.email,
+ 'hrn': key.reg_user.hrn}
+
+ #ldap_rslt = self.ldap.LdapSearch({'enabled']=True})
+ #user_by_email = dict((user[1]['mail'][0], user[1]['sshPublicKey']) \
+ #for user in ldap_rslt)
+
+ logger.debug("CORTEXLAB_API GetKeys -key_dict %s \r\n " % (key_dict))
+ return key_dict
+
+ #TODO : test
+ def DeleteKey(self, user_record, key_string):
+ """Deletes a key in the LDAP entry of the specified user.
+
+ Removes the key_string from the user's key list and updates the LDAP
+ user's entry with the new key attributes.
+
+ :param key_string: The ssh key to remove
+ :param user_record: User's record
+ :type key_string: string
+ :type user_record: dict
+ :returns: True if sucessful, False if not.
+ :rtype: Boolean
+
+ """
+ all_user_keys = user_record['keys']
+ all_user_keys.remove(key_string)
+ new_attributes = {'sshPublicKey':all_user_keys}
+ ret = self.ldap.LdapModifyUser(user_record, new_attributes)
+ logger.debug("CORTEXLAB_API DeleteKey %s- " % (ret))
+ return ret['bool']
+
+
+
+
+ @staticmethod
+ def _sql_get_slice_info(slice_filter):
+ """
+ Get the slice record based on the slice hrn. Fetch the record of the
+ user associated with the slice by using joinedload based on the
+ reg_researcher relationship.
+
+ :param slice_filter: the slice hrn we are looking for
+ :type slice_filter: string
+ :returns: the slice record enhanced with the user's information if the
+ slice was found, None it wasn't.
+
+ :rtype: dict or None.
+ """
+ #DO NOT USE RegSlice - reg_researchers to get the hrn
+ #of the user otherwise will mess up the RegRecord in
+ #Resolve, don't know why - SA 08/08/2012
+
+ #Only one entry for one user = one slice in testbed_xp table
+ #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
+ raw_slicerec = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn=slice_filter).first()
+ #raw_slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
+ if raw_slicerec:
+ #load_reg_researcher
+ #raw_slicerec.reg_researchers
+ raw_slicerec = raw_slicerec.__dict__
+ logger.debug(" CORTEXLAB_API \t _sql_get_slice_info slice_filter %s \
+ raw_slicerec %s" % (slice_filter, raw_slicerec))
+ slicerec = raw_slicerec
+ #only one researcher per slice so take the first one
+ #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
+ #del slicerec['reg_researchers']['_sa_instance_state']
+ return slicerec
+
+ else:
+ return None
+
+ @staticmethod
+ def _sql_get_slice_info_from_user(slice_filter):
+ """
+ Get the slice record based on the user recordid by using a joinedload
+ on the relationship reg_slices_as_researcher. Format the sql record
+ into a dict with the mandatory fields for user and slice.
+ :returns: dict with slice record and user record if the record was found
+ based on the user's id, None if not..
+ :rtype:dict or None..
+ """
+ #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
+ raw_slicerec = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id=slice_filter).first()
+ #raw_slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
+ #Put it in correct order
+ user_needed_fields = ['peer_authority', 'hrn', 'last_updated',
+ 'classtype', 'authority', 'gid', 'record_id',
+ 'date_created', 'type', 'email', 'pointer']
+ slice_needed_fields = ['peer_authority', 'hrn', 'last_updated',
+ 'classtype', 'authority', 'gid', 'record_id',
+ 'date_created', 'type', 'pointer']
+ if raw_slicerec:
+ #raw_slicerec.reg_slices_as_researcher
+ raw_slicerec = raw_slicerec.__dict__
+ slicerec = {}
+ slicerec = \
+ dict([(k, raw_slicerec[
+ 'reg_slices_as_researcher'][0].__dict__[k])
+ for k in slice_needed_fields])
+ slicerec['reg_researchers'] = dict([(k, raw_slicerec[k])
+ for k in user_needed_fields])
+ #TODO Handle multiple slices for one user SA 10/12/12
+ #for now only take the first slice record associated to the rec user
+ ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
+ #del raw_slicerec['reg_slices_as_researcher']
+ #slicerec['reg_researchers'] = raw_slicerec
+ ##del slicerec['_sa_instance_state']
+
+ return slicerec
+
+ else:
+ return None
+
+ def _get_slice_records(self, slice_filter=None,
+ slice_filter_type=None):
+ """
+ Get the slice record depending on the slice filter and its type.
+ :param slice_filter: Can be either the slice hrn or the user's record
+ id.
+ :type slice_filter: string
+ :param slice_filter_type: describes the slice filter type used, can be
+ slice_hrn or record_id_user
+ :type: string
+ :returns: the slice record
+ :rtype:dict
+ .. seealso::_sql_get_slice_info_from_user
+ .. seealso:: _sql_get_slice_info
+ """
+
+ #Get list of slices based on the slice hrn
+ if slice_filter_type == 'slice_hrn':
+
+ #if get_authority(slice_filter) == self.root_auth:
+ #login = slice_filter.split(".")[1].split("_")[0]
+
+ slicerec = self._sql_get_slice_info(slice_filter)
+
+ if slicerec is None:
+ return None
+ #return login, None
+
+ #Get slice based on user id
+ if slice_filter_type == 'record_id_user':
+
+ slicerec = self._sql_get_slice_info_from_user(slice_filter)
+
+ if slicerec:
+ fixed_slicerec_dict = slicerec
+ #At this point if there is no login it means
+ #record_id_user filter has been used for filtering
+ #if login is None :
+ ##If theslice record is from iotlab
+ #if fixed_slicerec_dict['peer_authority'] is None:
+ #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ #return login, fixed_slicerec_dict
+ return fixed_slicerec_dict
+ else:
+ return None
+
+
+ def GetSlices(self, slice_filter=None, slice_filter_type=None,
+ login=None):
+ """Get the slice records from the sfa db and add lease information
+ if any.
+
+ :param slice_filter: can be the slice hrn or slice record id in the db
+ depending on the slice_filter_type.
+ :param slice_filter_type: defines the type of the filtering used, Can be
+ either 'slice_hrn' or 'record_id'.
+ :type slice_filter: string
+ :type slice_filter_type: string
+ :returns: a slice dict if slice_filter and slice_filter_type
+ are specified and a matching entry is found in the db. The result
+ is put into a list.Or a list of slice dictionnaries if no filters
+ arespecified.
+
+ :rtype: list
+
+ """
+ #login = None
+ authorized_filter_types_list = ['slice_hrn', 'record_id_user']
+ return_slicerec_dictlist = []
+
+ #First try to get information on the slice based on the filter provided
+ if slice_filter_type in authorized_filter_types_list:
+ fixed_slicerec_dict = self._get_slice_records(slice_filter,
+ slice_filter_type)
+ # if the slice was not found in the sfa db
+ if fixed_slicerec_dict is None:
+ return return_slicerec_dictlist
+
+ slice_hrn = fixed_slicerec_dict['hrn']
+
+ logger.debug(" CORTEXLAB_API \tGetSlices login %s \
+ slice record %s slice_filter %s \
+ slice_filter_type %s " % (login,
+ fixed_slicerec_dict, slice_filter,
+ slice_filter_type))
+
+
+ #Now we have the slice record fixed_slicerec_dict, get the
+ #jobs associated to this slice
+ leases_list = []
+
+ leases_list = self.GetLeases(login=login)
+ #If no job is running or no job scheduled
+ #return only the slice record
+ if leases_list == [] and fixed_slicerec_dict:
+ return_slicerec_dictlist.append(fixed_slicerec_dict)
+
+ # if the jobs running don't belong to the user/slice we are looking
+ # for
+ leases_hrn = [lease['slice_hrn'] for lease in leases_list]
+ if slice_hrn not in leases_hrn:
+ return_slicerec_dictlist.append(fixed_slicerec_dict)
+ #If several experiments for one slice , put the slice record into
+ # each lease information dict
+ for lease in leases_list:
+ slicerec_dict = {}
+ logger.debug("CORTEXLAB_API.PY \tGetSlices slice_filter %s \
+ \t lease['slice_hrn'] %s"
+ % (slice_filter, lease['slice_hrn']))
+ if lease['slice_hrn'] == slice_hrn:
+ slicerec_dict['experiment_id'] = lease['lease_id']
+ #Update lease dict with the slice record
+ if fixed_slicerec_dict:
+ fixed_slicerec_dict['experiment_id'] = []
+ fixed_slicerec_dict['experiment_id'].append(
+ slicerec_dict['experiment_id'])
+ slicerec_dict.update(fixed_slicerec_dict)
+
+ slicerec_dict['slice_hrn'] = lease['slice_hrn']
+ slicerec_dict['hrn'] = lease['slice_hrn']
+ slicerec_dict['user'] = lease['user']
+ slicerec_dict.update(
+ {'list_node_ids':
+ {'hostname': lease['reserved_nodes']}})
+ slicerec_dict.update({'node_ids': lease['reserved_nodes']})
+
+
+ return_slicerec_dictlist.append(slicerec_dict)
+
+
+ logger.debug("CORTEXLAB_API.PY \tGetSlices \
+ slicerec_dict %s return_slicerec_dictlist %s \
+ lease['reserved_nodes'] \
+ %s" % (slicerec_dict, return_slicerec_dictlist,
+ lease['reserved_nodes']))
+
+ logger.debug("CORTEXLAB_API.PY \tGetSlices RETURN \
+ return_slicerec_dictlist %s"
+ % (return_slicerec_dictlist))
+
+ return return_slicerec_dictlist
+
+
+ else:
+ #Get all slices from the cortexlab sfa database , get the user info
+ # as well at the same time put them in dict format
+
+ query_slice_list = \
+ dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
+
+ for record in query_slice_list:
+ tmp = record.__dict__
+ tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
+ return_slicerec_dictlist.append(tmp)
+
+
+ #Get all the experiments reserved nodes
+ leases_list = self.GetReservedNodes()
+
+ for fixed_slicerec_dict in return_slicerec_dictlist:
+ slicerec_dict = {}
+ #Check if the slice belongs to a cortexlab user
+ if fixed_slicerec_dict['peer_authority'] is None:
+ owner = fixed_slicerec_dict['hrn'].split(
+ ".")[1].split("_")[0]
+ else:
+ owner = None
+ for lease in leases_list:
+ if owner == lease['user']:
+ slicerec_dict['experiment_id'] = lease['lease_id']
+
+ #for reserved_node in lease['reserved_nodes']:
+ logger.debug("CORTEXLAB_API.PY \tGetSlices lease %s "
+ % (lease))
+ slicerec_dict.update(fixed_slicerec_dict)
+ slicerec_dict.update({'node_ids':
+ lease['reserved_nodes']})
+ slicerec_dict.update({'list_node_ids':
+ {'hostname':
+ lease['reserved_nodes']}})
+
+
+ fixed_slicerec_dict.update(slicerec_dict)
+
+ logger.debug("CORTEXLAB_API.PY \tGetSlices RETURN \
+ return_slicerec_dictlist %s \slice_filter %s " \
+ %(return_slicerec_dictlist, slice_filter))
+
+ return return_slicerec_dictlist
+
+
+
+ #Update slice unused, therefore sfa_fields_to_iotlab_fields unused
+ #SA 30/05/13
+ #@staticmethod
+ #def sfa_fields_to_iotlab_fields(sfa_type, hrn, record):
+ #"""
+ #"""
+
+ #iotlab_record = {}
+ ##for field in record:
+ ## iotlab_record[field] = record[field]
+
+ #if sfa_type == "slice":
+ ##instantion used in get_slivers ?
+ #if not "instantiation" in iotlab_record:
+ #iotlab_record["instantiation"] = "iotlab-instantiated"
+ ##iotlab_record["hrn"] = hrn_to_pl_slicename(hrn)
+ ##Unused hrn_to_pl_slicename because Iotlab's hrn already
+ ##in the appropriate form SA 23/07/12
+ #iotlab_record["hrn"] = hrn
+ #logger.debug("CORTEXLAB_API.PY sfa_fields_to_iotlab_fields \
+ #iotlab_record %s " %(iotlab_record['hrn']))
+ #if "url" in record:
+ #iotlab_record["url"] = record["url"]
+ #if "description" in record:
+ #iotlab_record["description"] = record["description"]
+ #if "expires" in record:
+ #iotlab_record["expires"] = int(record["expires"])
+
+ ##nodes added by OAR only and then imported to SFA
+ ##elif type == "node":
+ ##if not "hostname" in iotlab_record:
+ ##if not "hostname" in record:
+ ##raise MissingSfaInfo("hostname")
+ ##iotlab_record["hostname"] = record["hostname"]
+ ##if not "model" in iotlab_record:
+ ##iotlab_record["model"] = "geni"
+
+ ##One authority only
+ ##elif type == "authority":
+ ##iotlab_record["login_base"] = hrn_to_iotlab_login_base(hrn)
+
+ ##if not "name" in iotlab_record:
+ ##iotlab_record["name"] = hrn
+
+ ##if not "abbreviated_name" in iotlab_record:
+ ##iotlab_record["abbreviated_name"] = hrn
+
+ ##if not "enabled" in iotlab_record:
+ ##iotlab_record["enabled"] = True
+
+ ##if not "is_public" in iotlab_record:
+ ##iotlab_record["is_public"] = True
+
+ #return iotlab_record
+
+
+
+
+
+
+
+
+
+
--- /dev/null
+"""
+Implements what a driver should provide for SFA to work.
+"""
+from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
+from sfa.util.sfalogging import logger
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
+
+from sfa.iotlab.iotlabaggregate import IotlabAggregate, iotlab_xrn_to_hostname
+from sfa.iotlab.iotlabslices import IotlabSlices
+
+
+from sfa.iotlab.iotlabapi import IotlabTestbedAPI
+
+
+class CortexlabDriver(Driver):
+ """ Cortexlab Driver class inherited from Driver generic class.
+
+ Contains methods compliant with the SFA standard and the testbed
+ infrastructure (calls to LDAP and scheduler to book the nodes).
+
+ .. seealso::: Driver class
+
+ """
+ def __init__(self, config):
+ """
+
+ Sets the iotlab SFA config parameters,
+ instanciates the testbed api and the iotlab database.
+
+ :param config: iotlab SFA configuration object
+ :type config: Config object
+
+ """
+ Driver.__init__(self, config)
+ self.config = config
+ self.iotlab_api = IotlabTestbedAPI(config)
+ self.cache = None
+
+ def augment_records_with_testbed_info(self, record_list):
+ """
+
+ Adds specific testbed info to the records.
+
+ :param record_list: list of sfa dictionaries records
+ :type record_list: list
+ :returns: list of records with extended information in each record
+ :rtype: list
+
+ """
+ return self.fill_record_info(record_list)
+
+ def fill_record_info(self, record_list):
+ """
+
+ For each SFA record, fill in the iotlab specific and SFA specific
+ fields in the record.
+
+ :param record_list: list of sfa dictionaries records
+ :type record_list: list
+ :returns: list of records with extended information in each record
+ :rtype: list
+
+ .. warning:: Should not be modifying record_list directly because modi
+ fication are kept outside the method's scope. Howerver, there is no
+ other way to do it given the way it's called in registry manager.
+
+ """
+
+ logger.debug("IOTLABDRIVER \tfill_record_info records %s "
+ % (record_list))
+ if not isinstance(record_list, list):
+ record_list = [record_list]
+
+ try:
+ for record in record_list:
+
+ if str(record['type']) == 'node':
+ # look for node info using GetNodes
+ # the record is about one node only
+ filter_dict = {'hrn': [record['hrn']]}
+ node_info = self.iotlab_api.GetNodes(filter_dict)
+ # the node_info is about one node only, but it is formatted
+ # as a list
+ record.update(node_info[0])
+ logger.debug("IOTLABDRIVER.PY \t \
+ fill_record_info NODE" % (record))
+
+ #If the record is a SFA slice record, then add information
+ #about the user of this slice. This kind of
+ #information is in the Iotlab's DB.
+ if str(record['type']) == 'slice':
+ if 'reg_researchers' in record and isinstance(record
+ ['reg_researchers'],
+ list):
+ record['reg_researchers'] = \
+ record['reg_researchers'][0].__dict__
+ record.update(
+ {'PI': [record['reg_researchers']['hrn']],
+ 'researcher': [record['reg_researchers']['hrn']],
+ 'name': record['hrn'],
+ 'oar_job_id': [],
+ 'node_ids': [],
+ 'person_ids': [record['reg_researchers']
+ ['record_id']],
+ # For client_helper.py compatibility
+ 'geni_urn': '',
+ # For client_helper.py compatibility
+ 'keys': '',
+ # For client_helper.py compatibility
+ 'key_ids': ''})
+
+ #Get iotlab slice record and oar job id if any.
+ recslice_list = self.iotlab_api.GetSlices(
+ slice_filter=str(record['hrn']),
+ slice_filter_type='slice_hrn')
+
+ logger.debug("IOTLABDRIVER \tfill_record_info \
+ TYPE SLICE RECUSER record['hrn'] %s record['oar_job_id']\
+ %s " % (record['hrn'], record['oar_job_id']))
+ del record['reg_researchers']
+ try:
+ for rec in recslice_list:
+ logger.debug("IOTLABDRIVER\r\n \t \
+ fill_record_info oar_job_id %s "
+ % (rec['oar_job_id']))
+
+ record['node_ids'] = [self.iotlab_api.root_auth +
+ '.' + hostname for hostname
+ in rec['node_ids']]
+ except KeyError:
+ pass
+
+ logger.debug("IOTLABDRIVER.PY \t fill_record_info SLICE \
+ recslice_list %s \r\n \t RECORD %s \r\n \
+ \r\n" % (recslice_list, record))
+
+ if str(record['type']) == 'user':
+ #The record is a SFA user record.
+ #Get the information about his slice from Iotlab's DB
+ #and add it to the user record.
+ recslice_list = self.iotlab_api.GetSlices(
+ slice_filter=record['record_id'],
+ slice_filter_type='record_id_user')
+
+ logger.debug("IOTLABDRIVER.PY \t fill_record_info \
+ TYPE USER recslice_list %s \r\n \t RECORD %s \r\n"
+ % (recslice_list, record))
+ #Append slice record in records list,
+ #therefore fetches user and slice info again(one more loop)
+ #Will update PIs and researcher for the slice
+
+ recuser = recslice_list[0]['reg_researchers']
+ logger.debug("IOTLABDRIVER.PY \t fill_record_info USER \
+ recuser %s \r\n \r\n" % (recuser))
+ recslice = {}
+ recslice = recslice_list[0]
+ recslice.update(
+ {'PI': [recuser['hrn']],
+ 'researcher': [recuser['hrn']],
+ 'name': record['hrn'],
+ 'node_ids': [],
+ 'oar_job_id': [],
+ 'person_ids': [recuser['record_id']]})
+ try:
+ for rec in recslice_list:
+ recslice['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
+
+ recslice.update({'type': 'slice',
+ 'hrn': recslice_list[0]['hrn']})
+
+ #GetPersons takes [] as filters
+ user_iotlab = self.iotlab_api.GetPersons([record])
+
+ record.update(user_iotlab[0])
+ #For client_helper.py compatibility
+ record.update(
+ {'geni_urn': '',
+ 'keys': '',
+ 'key_ids': ''})
+ record_list.append(recslice)
+
+ logger.debug("IOTLABDRIVER.PY \t \
+ fill_record_info ADDING SLICE\
+ INFO TO USER records %s" % (record_list))
+
+ except TypeError, error:
+ logger.log_exc("IOTLABDRIVER \t fill_record_info EXCEPTION %s"
+ % (error))
+
+ return record_list
+
+ def sliver_status(self, slice_urn, slice_hrn):
+ """
+ Receive a status request for slice named urn/hrn
+ urn:publicid:IDN+iotlab+nturro_slice hrn iotlab.nturro_slice
+ shall return a structure as described in
+ http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ NT : not sure if we should implement this or not, but used by sface.
+
+ :param slice_urn: slice urn
+ :type slice_urn: string
+ :param slice_hrn: slice hrn
+ :type slice_hrn: string
+
+ """
+
+ #First get the slice with the slice hrn
+ slice_list = self.iotlab_api.GetSlices(slice_filter=slice_hrn,
+ slice_filter_type='slice_hrn')
+
+ if len(slice_list) == 0:
+ raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
+
+ #Used for fetching the user info witch comes along the slice info
+ one_slice = slice_list[0]
+
+ #Make a list of all the nodes hostnames in use for this slice
+ slice_nodes_list = []
+ slice_nodes_list = one_slice['node_ids']
+ #Get all the corresponding nodes details
+ nodes_all = self.iotlab_api.GetNodes(
+ {'hostname': slice_nodes_list},
+ ['node_id', 'hostname', 'site', 'boot_state'])
+ nodeall_byhostname = dict([(one_node['hostname'], one_node)
+ for one_node in nodes_all])
+
+ for single_slice in slice_list:
+ #For compatibility
+ top_level_status = 'empty'
+ result = {}
+ result.fromkeys(
+ ['geni_urn', 'geni_error', 'iotlab_login', 'geni_status',
+ 'geni_resources'], None)
+ # result.fromkeys(\
+ # ['geni_urn','geni_error', 'pl_login','geni_status',
+ # 'geni_resources'], None)
+ # result['pl_login'] = one_slice['reg_researchers'][0].hrn
+ result['iotlab_login'] = one_slice['user']
+ logger.debug("Slabdriver - sliver_status Sliver status \
+ urn %s hrn %s single_slice %s \r\n "
+ % (slice_urn, slice_hrn, single_slice))
+
+ if 'node_ids' not in single_slice:
+ #No job in the slice
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = []
+ return result
+
+ top_level_status = 'ready'
+
+ #A job is running on Iotlab for this slice
+ # report about the local nodes that are in the slice only
+
+ result['geni_urn'] = slice_urn
+
+ resources = []
+ for node_hostname in single_slice['node_ids']:
+ res = {}
+ res['iotlab_hostname'] = node_hostname
+ res['iotlab_boot_state'] = \
+ nodeall_byhostname[node_hostname]['boot_state']
+
+ #res['pl_hostname'] = node['hostname']
+ #res['pl_boot_state'] = \
+ #nodeall_byhostname[node['hostname']]['boot_state']
+ #res['pl_last_contact'] = strftime(self.time_format, \
+ #gmtime(float(timestamp)))
+ sliver_id = Xrn(
+ slice_urn, type='slice',
+ id=nodeall_byhostname[node_hostname]['node_id']).urn
+
+ res['geni_urn'] = sliver_id
+ #node_name = node['hostname']
+ if nodeall_byhostname[node_hostname]['boot_state'] == 'Alive':
+
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ logger.debug("IOTLABDRIVER \tsliver_statusresources %s res %s "
+ % (resources, res))
+ return result
+
+ @staticmethod
+ def get_user_record(hrn):
+ """
+
+ Returns the user record based on the hrn from the SFA DB .
+
+ :param hrn: user's hrn
+ :type hrn: string
+ :returns: user record from SFA database
+ :rtype: RegUser
+
+ """
+ return dbsession.query(RegRecord).filter_by(hrn=hrn).first()
+
+ def testbed_name(self):
+ """
+
+ Returns testbed's name.
+ :returns: testbed authority name.
+ :rtype: string
+
+ """
+ return self.hrn
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version(self):
+ """
+
+ Returns the testbed's supported rspec advertisement and request
+ versions.
+ :returns: rspec versions supported ad a dictionary.
+ :rtype: dict
+
+ """
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed': self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions}
+
+ def _get_requested_leases_list(self, rspec):
+ """
+ Process leases in rspec depending on the rspec version (format)
+ type. Find the lease requests in the rspec and creates
+ a lease request list with the mandatory information ( nodes,
+ start time and duration) of the valid leases (duration above or
+ equal to the iotlab experiment minimum duration).
+
+ :param rspec: rspec request received.
+ :type rspec: RSpec
+ :returns: list of lease requests found in the rspec
+ :rtype: list
+ """
+ requested_lease_list = []
+ for lease in rspec.version.get_leases():
+ single_requested_lease = {}
+ logger.debug("IOTLABDRIVER.PY \t \
+ _get_requested_leases_list lease %s " % (lease))
+
+ if not lease.get('lease_id'):
+ if get_authority(lease['component_id']) == \
+ self.iotlab_api.root_auth:
+ single_requested_lease['hostname'] = \
+ iotlab_xrn_to_hostname(\
+ lease.get('component_id').strip())
+ single_requested_lease['start_time'] = \
+ lease.get('start_time')
+ single_requested_lease['duration'] = lease.get('duration')
+ #Check the experiment's duration is valid before adding
+ #the lease to the requested leases list
+ duration_in_seconds = \
+ int(single_requested_lease['duration'])
+ if duration_in_seconds >= self.iotlab_api.GetMinExperimentDurationInGranularity():
+ requested_lease_list.append(single_requested_lease)
+
+ return requested_lease_list
+
+ @staticmethod
+ def _group_leases_by_start_time(requested_lease_list):
+ """
+ Create dict of leases by start_time, regrouping nodes reserved
+ at the same time, for the same amount of time so as to
+ define one job on OAR.
+
+ :param requested_lease_list: list of leases
+ :type requested_lease_list: list
+ :returns: Dictionary with key = start time, value = list of leases
+ with the same start time.
+ :rtype: dictionary
+
+ """
+
+ requested_job_dict = {}
+ for lease in requested_lease_list:
+
+ #In case it is an asap experiment start_time is empty
+ if lease['start_time'] == '':
+ lease['start_time'] = '0'
+
+ if lease['start_time'] not in requested_job_dict:
+ if isinstance(lease['hostname'], str):
+ lease['hostname'] = [lease['hostname']]
+
+ requested_job_dict[lease['start_time']] = lease
+
+ else:
+ job_lease = requested_job_dict[lease['start_time']]
+ if lease['duration'] == job_lease['duration']:
+ job_lease['hostname'].append(lease['hostname'])
+
+ return requested_job_dict
+
+ def _process_requested_jobs(self, rspec):
+ """
+ Turns the requested leases and information into a dictionary
+ of requested jobs, grouped by starting time.
+
+ :param rspec: RSpec received
+ :type rspec : RSpec
+ :rtype: dictionary
+
+ """
+ requested_lease_list = self._get_requested_leases_list(rspec)
+ logger.debug("IOTLABDRIVER _process_requested_jobs \
+ requested_lease_list %s" % (requested_lease_list))
+ job_dict = self._group_leases_by_start_time(requested_lease_list)
+ logger.debug("IOTLABDRIVER _process_requested_jobs job_dict\
+ %s" % (job_dict))
+
+ return job_dict
+
+ def create_sliver(self, slice_urn, slice_hrn, creds, rspec_string,
+ users, options):
+ """Answer to CreateSliver.
+
+ Creates the leases and slivers for the users from the information
+ found in the rspec string.
+ Launch experiment on OAR if the requested leases is valid. Delete
+ no longer requested leases.
+
+
+ :param creds: user's credentials
+ :type creds: string
+ :param users: user record list
+ :type users: list
+ :param options:
+ :type options:
+
+ :returns: a valid Rspec for the slice which has just been
+ modified.
+ :rtype: RSpec
+
+
+ """
+ aggregate = IotlabAggregate(self)
+
+ slices = IotlabSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record = None
+
+ if not isinstance(creds, list):
+ creds = [creds]
+
+ if users:
+ slice_record = users[0].get('slice_record', {})
+ logger.debug("IOTLABDRIVER.PY \t ===============create_sliver \t\
+ creds %s \r\n \r\n users %s"
+ % (creds, users))
+ slice_record['user'] = {'keys': users[0]['keys'],
+ 'email': users[0]['email'],
+ 'hrn': slice_record['reg-researchers'][0]}
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ logger.debug("IOTLABDRIVER.PY \t create_sliver \trspec.version \
+ %s slice_record %s users %s"
+ % (rspec.version, slice_record, users))
+
+ # ensure site record exists?
+ # ensure slice record exists
+ #Removed options in verify_slice SA 14/08/12
+ #Removed peer record in verify_slice SA 18/07/13
+ sfa_slice = slices.verify_slice(slice_hrn, slice_record, sfa_peer)
+
+ # ensure person records exists
+ #verify_persons returns added persons but the return value
+ #is not used
+ #Removed peer record and sfa_peer in verify_persons SA 18/07/13
+ slices.verify_persons(slice_hrn, sfa_slice, users, options=options)
+ #requested_attributes returned by rspec.version.get_slice_attributes()
+ #unused, removed SA 13/08/12
+ #rspec.version.get_slice_attributes()
+
+ logger.debug("IOTLABDRIVER.PY create_sliver slice %s " % (sfa_slice))
+
+ # add/remove slice from nodes
+
+ #requested_slivers = [node.get('component_id') \
+ #for node in rspec.version.get_nodes_with_slivers()\
+ #if node.get('authority_id') is self.iotlab_api.root_auth]
+ #l = [ node for node in rspec.version.get_nodes_with_slivers() ]
+ #logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
+ #requested_slivers %s listnodes %s" \
+ #%(requested_slivers,l))
+ #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
+ #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
+
+ requested_job_dict = self._process_requested_jobs(rspec)
+
+ logger.debug("IOTLABDRIVER.PY \tcreate_sliver requested_job_dict %s "
+ % (requested_job_dict))
+ #verify_slice_leases returns the leases , but the return value is unused
+ #here. Removed SA 13/08/12
+ slices.verify_slice_leases(sfa_slice,
+ requested_job_dict, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn,
+ login=sfa_slice['login'],
+ version=rspec.version)
+
+ def delete_sliver(self, slice_urn, slice_hrn, creds, options):
+ """
+ Deletes the lease associated with the slice hrn and the credentials
+ if the slice belongs to iotlab. Answer to DeleteSliver.
+
+ :param slice_urn: urn of the slice
+ :param slice_hrn: name of the slice
+ :param creds: slice credenials
+ :type slice_urn: string
+ :type slice_hrn: string
+ :type creds: ? unused
+
+ :returns: 1 if the slice to delete was not found on iotlab,
+ True if the deletion was successful, False otherwise otherwise.
+
+ .. note:: Should really be named delete_leases because iotlab does
+ not have any slivers, but only deals with leases. However,
+ SFA api only have delete_sliver define so far. SA 13/05/2013
+ .. note:: creds are unused, and are not used either in the dummy driver
+ delete_sliver .
+ """
+
+ sfa_slice_list = self.iotlab_api.GetSlices(
+ slice_filter=slice_hrn,
+ slice_filter_type='slice_hrn')
+
+ if not sfa_slice_list:
+ return 1
+
+ #Delete all leases in the slice
+ for sfa_slice in sfa_slice_list:
+ logger.debug("IOTLABDRIVER.PY delete_sliver slice %s" % (sfa_slice))
+ slices = IotlabSlices(self)
+ # determine if this is a peer slice
+
+ peer = slices.get_peer(slice_hrn)
+
+ logger.debug("IOTLABDRIVER.PY delete_sliver peer %s \
+ \r\n \t sfa_slice %s " % (peer, sfa_slice))
+ try:
+ self.iotlab_api.DeleteSliceFromNodes(sfa_slice)
+ return True
+ except:
+ return False
+
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ """
+
+ List resources from the iotlab aggregate and returns a Rspec
+ advertisement with resources found when slice_urn and slice_hrn are
+ None (in case of resource discovery).
+ If a slice hrn and urn are provided, list experiment's slice
+ nodes in a rspec format. Answer to ListResources.
+ Caching unused.
+
+ :param slice_urn: urn of the slice
+ :param slice_hrn: name of the slice
+ :param creds: slice credenials
+ :type slice_urn: string
+ :type slice_hrn: string
+ :type creds: ? unused
+ :param options: options used when listing resources (list_leases, info,
+ geni_available)
+ :returns: rspec string in xml
+ :rtype: string
+
+ .. note:: creds are unused
+ """
+
+ #cached_requested = options.get('cached', True)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = \
+ version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_" + \
+ options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_" + \
+ options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + \
+ str(options.get('geni_available'))
+
+ # look in cache first
+ #if cached_requested and self.cache and not slice_hrn:
+ #rspec = self.cache.get(version_string)
+ #if rspec:
+ #logger.debug("IotlabDriver.ListResources: \
+ #returning cached advertisement")
+ #return rspec
+
+ #panos: passing user-defined options
+ aggregate = IotlabAggregate(self)
+
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn,
+ version=rspec_version, options=options)
+
+ # cache the result
+ #if self.cache and not slice_hrn:
+ #logger.debug("Iotlab.ListResources: stores advertisement in cache")
+ #self.cache.add(version_string, rspec)
+
+ return rspec
+
+
+ def list_slices(self, creds, options):
+ """Answer to ListSlices.
+
+ List slices belonging to iotlab, returns slice urns list.
+ No caching used. Options unused but are defined in the SFA method
+ api prototype.
+
+ :returns: slice urns list
+ :rtype: list
+
+ .. note:: creds are unused
+ """
+ # look in cache first
+ #if self.cache:
+ #slices = self.cache.get('slices')
+ #if slices:
+ #logger.debug("PlDriver.list_slices returns from cache")
+ #return slices
+
+ # get data from db
+
+ slices = self.iotlab_api.GetSlices()
+ logger.debug("IOTLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n"
+ % (slices))
+ slice_hrns = [iotlab_slice['hrn'] for iotlab_slice in slices]
+
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice')
+ for slice_hrn in slice_hrns]
+
+ # cache the result
+ #if self.cache:
+ #logger.debug ("IotlabDriver.list_slices stores value in cache")
+ #self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+
+ def register(self, sfa_record, hrn, pub_key):
+ """
+ Adding new user, slice, node or site should not be handled
+ by SFA.
+
+ ..warnings:: should not be used. Different components are in charge of
+ doing this task. Adding nodes = OAR
+ Adding users = LDAP Iotlab
+ Adding slice = Import from LDAP users
+ Adding site = OAR
+
+ :param sfa_record: record provided by the client of the
+ Register API call.
+ :type sfa_record: dict
+ :param pub_key: public key of the user
+ :type pub_key: string
+
+ .. note:: DOES NOTHING. Returns -1.
+
+ """
+ return -1
+
+
+ def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
+ """
+ No site or node record update allowed in Iotlab. The only modifications
+ authorized here are key deletion/addition on an existing user and
+ password change. On an existing user, CAN NOT BE MODIFIED: 'first_name',
+ 'last_name', 'email'. DOES NOT EXIST IN SENSLAB: 'phone', 'url', 'bio',
+ 'title', 'accepted_aup'. A slice is bound to its user, so modifying the
+ user's ssh key should nmodify the slice's GID after an import procedure.
+
+ :param old_sfa_record: what is in the db for this hrn
+ :param new_sfa_record: what was passed to the update call
+ :param new_key: the new user's public key
+ :param hrn: the user's sfa hrn
+ :type old_sfa_record: dict
+ :type new_sfa_record: dict
+ :type new_key: string
+ :type hrn: string
+
+ TODO: needs review
+ .. seealso:: update in driver.py.
+
+ """
+ pointer = old_sfa_record['pointer']
+ old_sfa_record_type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and old_sfa_record_type not in ['user']:
+ raise UnknownSfaType(old_sfa_record_type)
+
+ if old_sfa_record_type == "user":
+ update_fields = {}
+ all_fields = new_sfa_record
+ for key in all_fields.keys():
+ if key in ['key', 'password']:
+ update_fields[key] = all_fields[key]
+
+ if new_key:
+ # must check this key against the previous one if it exists
+ persons = self.iotlab_api.GetPersons([old_sfa_record])
+ person = persons[0]
+ keys = [person['pkey']]
+ #Get all the person's keys
+ keys_dict = self.iotlab_api.GetKeys(keys)
+
+ # Delete all stale keys, meaning the user has only one key
+ #at a time
+ #TODO: do we really want to delete all the other keys?
+ #Is this a problem with the GID generation to have multiple
+ #keys? SA 30/05/13
+ key_exists = False
+ if key in keys_dict:
+ key_exists = True
+ else:
+ #remove all the other keys
+ for key in keys_dict:
+ self.iotlab_api.DeleteKey(person, key)
+ self.iotlab_api.AddPersonKey(
+ person, {'sshPublicKey': person['pkey']},
+ {'sshPublicKey': new_key})
+ return True
+
+ def remove(self, sfa_record):
+ """
+
+ Removes users only. Mark the user as disabled in LDAP. The user and his
+ slice are then deleted from the db by running an import on the registry.
+
+ :param sfa_record: record is the existing sfa record in the db
+ :type sfa_record: dict
+
+ ..warning::As fas as the slice is concerned, here only the leases are
+ removed from the slice. The slice is record itself is not removed
+ from the db.
+
+ TODO: needs review
+
+ TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
+
+ TODO: return boolean for the slice part
+ """
+ sfa_record_type = sfa_record['type']
+ hrn = sfa_record['hrn']
+ if sfa_record_type == 'user':
+
+ #get user from iotlab ldap
+ person = self.iotlab_api.GetPersons(sfa_record)
+ #No registering at a given site in Iotlab.
+ #Once registered to the LDAP, all iotlab sites are
+ #accesible.
+ if person:
+ #Mark account as disabled in ldap
+ return self.iotlab_api.DeletePerson(sfa_record)
+
+ elif sfa_record_type == 'slice':
+ if self.iotlab_api.GetSlices(slice_filter=hrn,
+ slice_filter_type='slice_hrn'):
+ ret = self.iotlab_api.DeleteSlice(sfa_record)
+ return True
--- /dev/null
+"""
+File defining classes to handle the table in the iotlab dedicated database.
+"""
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+# from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy import Table, MetaData
+from sqlalchemy.ext.declarative import declarative_base
+
+# from sqlalchemy.dialects import postgresql
+
+from sqlalchemy.exc import NoSuchTableError
+
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
+ CASCADE ON UPDATE CASCADE', 'oar_job_id': 'integer DEFAULT -1',
+ 'record_id_slice': 'integer', 'slice_hrn': 'text NOT NULL'}
+
+#Dict with all the specific iotlab tables
+tablenames_dict = {'iotlab_xp': slice_table}
+
+
+IotlabBase = declarative_base()
+
+
+class IotlabXP (IotlabBase):
+ """ SQL alchemy class to manipulate the rows of the slice_iotlab table in
+ iotlab_sfa database. Handles the records representation and creates the
+ table if it does not exist yet.
+
+ """
+ __tablename__ = 'iotlab_xp'
+
+ slice_hrn = Column(String)
+ job_id = Column(Integer, primary_key=True)
+ end_time = Column(Integer, nullable=False)
+
+ def __init__(self, slice_hrn=None, job_id=None, end_time=None):
+ """
+ Defines a row of the slice_iotlab table
+ """
+ if slice_hrn:
+ self.slice_hrn = slice_hrn
+ if job_id:
+ self.job_id = job_id
+ if end_time:
+ self.end_time = end_time
+
+ def __repr__(self):
+ """Prints the SQLAlchemy record to the format defined
+ by the function.
+ """
+ result = "<iotlab_xp : slice_hrn = %s , job_id %s end_time = %s" \
+ % (self.slice_hrn, self.job_id, self.end_time)
+ result += ">"
+ return result
+
+
+class IotlabDB(object):
+ """ SQL Alchemy connection class.
+ From alchemy.py
+ """
+ # Stores the unique Singleton instance-
+ _connection_singleton = None
+ # defines the database name
+ dbname = "iotlab_sfa"
+
+ class Singleton:
+ """
+ Class used with this Python singleton design pattern to allow the
+ definition of one single instance of iotlab db session in the whole
+ code. Wherever a conenction to the database is needed, this class
+ returns the same instance every time. Removes the need for global
+ variable throughout the code.
+ """
+
+ def __init__(self, config, debug=False):
+ self.iotlab_engine = None
+ self.iotlab_session = None
+ self.url = None
+ self.create_iotlab_engine(config, debug)
+ self.session()
+
+ def create_iotlab_engine(self, config, debug=False):
+ """Creates the SQLAlchemy engine, which is the starting point for
+ any SQLAlchemy application.
+ :param config: configuration object created by SFA based on the
+ configuration file in /etc
+ :param debug: if set to true, echo and echo pool will be set to true
+ as well. If echo is True, all statements as well as a repr() of
+ their parameter lists to the engines logger, which defaults to
+ sys.stdout. If echo_pool is True, the connection pool will log all
+ checkouts/checkins to the logging stream. A python logger can be
+ used to configure this logging directly but so far it has not been
+ configured. Refer to sql alchemy engine documentation.
+
+ :type config: Config instance (sfa.util.config)
+ :type debug: bool
+
+ """
+
+ if debug is True:
+ l_echo_pool = True
+ l_echo = True
+ else:
+ l_echo_pool = False
+ l_echo = False
+ # the former PostgreSQL.py used the psycopg2 directly and was doing
+ #self.connection.set_client_encoding("UNICODE")
+ # it's unclear how to achieve this in sqlalchemy, nor if it's needed
+ # at all
+ # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+ # we indeed have /var/lib/pgsql/data/postgresql.conf where
+ # this setting is unset, it might be an angle to tweak that if need
+ # be try a unix socket first
+ # - omitting the hostname does the trick
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s" \
+ % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
+ config.SFA_DB_PORT, IotlabDB.dbname)
+
+ # the TCP fallback method
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s" \
+ % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
+ config.SFA_DB_HOST, config.SFA_DB_PORT, IotlabDB.dbname)
+
+ for url in [unix_url, tcp_url]:
+ try:
+ self.iotlab_engine = create_engine(
+ url, echo_pool=l_echo_pool, echo=l_echo)
+ self.check()
+ self.url = url
+ return
+ except:
+ pass
+ self.iotlab_engine = None
+
+ raise Exception("Could not connect to database")
+
+ def check(self):
+ """ Check if a table exists by trying a selection
+ on the table.
+
+ """
+ self.iotlab_engine.execute("select 1").scalar()
+
+
+ def session(self):
+ """
+ Creates a SQLalchemy session. Once the session object is created
+ it should be used throughout the code for all the operations on
+ tables for this given database.
+
+ """
+ if self.iotlab_session is None:
+ Session = sessionmaker()
+ self.iotlab_session = Session(bind=self.iotlab_engine)
+ return self.iotlab_session
+
+ def close_session(self):
+ """
+ Closes connection to database.
+
+ """
+ if self.iotlab_session is None:
+ return
+ self.iotlab_session.close()
+ self.iotlab_session = None
+
+
+ def update_jobs_in_iotlabdb(self, job_oar_list, jobs_psql):
+ """ Cleans the iotlab db by deleting expired and cancelled jobs.
+
+ Compares the list of job ids given by OAR with the job ids that
+ are already in the database, deletes the jobs that are no longer in
+ the OAR job id list.
+
+ :param job_oar_list: list of job ids coming from OAR
+ :type job_oar_list: list
+ :param job_psql: list of job ids from the database.
+ :type job_psql: list
+
+ :returns: None
+ """
+ #Turn the list into a set
+ set_jobs_psql = set(jobs_psql)
+
+ kept_jobs = set(job_oar_list).intersection(set_jobs_psql)
+ logger.debug("\r\n \t update_jobs_in_iotlabdb jobs_psql %s \r\n \
+ job_oar_list %s kept_jobs %s "
+ % (set_jobs_psql, job_oar_list, kept_jobs))
+ deleted_jobs = set_jobs_psql.difference(kept_jobs)
+ deleted_jobs = list(deleted_jobs)
+ if len(deleted_jobs) > 0:
+ self.iotlab_session.query(IotlabXP).filter(IotlabXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
+ self.iotlab_session.commit()
+ return
+
+ def __init__(self, config, debug=False):
+ self.sl_base = IotlabBase
+
+ # Check whether we already have an instance
+ if IotlabDB._connection_singleton is None:
+ IotlabDB._connection_singleton = IotlabDB.Singleton(config, debug)
+
+ # Store instance reference as the only member in the handle
+ self._EventHandler_singleton = IotlabDB._connection_singleton
+
+ def __getattr__(self, aAttr):
+ """
+ Delegate access to implementation.
+
+ :param aAttr: Attribute wanted.
+ :returns: Attribute
+ """
+ return getattr(self._connection_singleton, aAttr)
+
+
+
+ # def __setattr__(self, aAttr, aValue):
+ # """Delegate access to implementation.
+
+ # :param attr: Attribute wanted.
+ # :param value: Vaule to be set.
+ # :return: Result of operation.
+ # """
+ # return setattr(self._connection_singleton, aAttr, aValue)
+
+ def exists(self, tablename):
+ """
+ Checks if the table specified as tablename exists.
+ :param tablename: name of the table in the db that has to be checked.
+ :type tablename: string
+ :returns: True if the table exists, False otherwise.
+ :rtype: bool
+
+ """
+ metadata = MetaData(bind=self.iotlab_engine)
+ try:
+ table = Table(tablename, metadata, autoload=True)
+ return True
+
+ except NoSuchTableError:
+ logger.log_exc("IOTLABPOSTGRES tablename %s does not exist"
+ % (tablename))
+ return False
+
+ def createtable(self):
+ """
+ Creates all the table sof the engine.
+ Uses the global dictionnary holding the tablenames and the table schema.
+
+ """
+
+ logger.debug("IOTLABPOSTGRES createtable \
+ IotlabBase.metadata.sorted_tables %s \r\n engine %s"
+ % (IotlabBase.metadata.sorted_tables, self.iotlab_engine))
+ IotlabBase.metadata.create_all(self.iotlab_engine)
+ return
--- /dev/null
+"""
+This file defines the IotlabSlices class by which all the slice checkings
+upon lease creation are done.
+"""
+from sfa.util.xrn import get_authority, urn_to_hrn
+from sfa.util.sfalogging import logger
+
+MAXINT = 2L**31-1
+
+
+class IotlabSlices:
+ """
+ This class is responsible for checking the slice when creating a
+ lease or a sliver. Those checks include verifying that the user is valid,
+ that the slice is known from the testbed or from our peers, that the list
+ of nodes involved has not changed (in this case the lease is modified
+ accordingly).
+ """
+ rspec_to_slice_tag = {'max_rate': 'net_max_rate'}
+
+ def __init__(self, driver):
+ """
+ Get the reference to the driver here.
+ """
+ self.driver = driver
+
+ def get_peer(self, xrn):
+ """
+ Finds the authority of a resource based on its xrn.
+ If the authority is Iotlab (local) return None,
+ Otherwise, look up in the DB if Iotlab is federated with this site
+ authority and returns its DB record if it is the case.
+
+ :param xrn: resource's xrn
+ :type xrn: string
+ :returns: peer record
+ :rtype: dict
+
+ """
+ hrn, hrn_type = urn_to_hrn(xrn)
+ #Does this slice belong to a local site or a peer iotlab site?
+ peer = None
+
+ # get this slice's authority (site)
+ slice_authority = get_authority(hrn)
+ #Iotlab stuff
+ #This slice belongs to the current site
+ if slice_authority == self.driver.iotlab_api.root_auth:
+ site_authority = slice_authority
+ return None
+
+ site_authority = get_authority(slice_authority).lower()
+ # get this site's authority (sfa root authority or sub authority)
+
+ logger.debug("IOTLABSLICES \t get_peer slice_authority %s \
+ site_authority %s hrn %s"
+ % (slice_authority, site_authority, hrn))
+
+ # check if we are already peered with this site_authority
+ #if so find the peer record
+ peers = self.driver.iotlab_api.GetPeers(peer_filter=site_authority)
+ for peer_record in peers:
+ if site_authority == peer_record.hrn:
+ peer = peer_record
+ logger.debug(" IOTLABSLICES \tget_peer peer %s " % (peer))
+ return peer
+
+ def get_sfa_peer(self, xrn):
+ """Returns the authority name for the xrn or None if the local site
+ is the authority.
+
+ :param xrn: the xrn of the resource we are looking the authority for.
+ :type xrn: string
+ :returns: the resources's authority name.
+ :rtype: string
+
+ """
+ hrn, hrn_type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+ def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
+ """
+ Compare requested leases with the leases already scheduled/
+ running in OAR. If necessary, delete and recreate modified leases,
+ and delete no longer requested ones.
+
+ :param sfa_slice: sfa slice record
+ :param requested_jobs_dict: dictionary of requested leases
+ :param peer: sfa peer record
+
+ :type sfa_slice: dict
+ :type requested_jobs_dict: dict
+ :type peer: dict
+ :returns: leases list of dictionary
+ :rtype: list
+
+ """
+
+ logger.debug("IOTLABSLICES verify_slice_leases sfa_slice %s "
+ % (sfa_slice))
+ #First get the list of current leases from OAR
+ leases = self.driver.iotlab_api.GetLeases({'slice_hrn': sfa_slice['hrn']})
+ logger.debug("IOTLABSLICES verify_slice_leases requested_jobs_dict %s \
+ leases %s " % (requested_jobs_dict, leases))
+
+ current_nodes_reserved_by_start_time = {}
+ requested_nodes_by_start_time = {}
+ leases_by_start_time = {}
+ reschedule_jobs_dict = {}
+
+ #Create reduced dictionary with key start_time and value
+ # the list of nodes
+ #-for the leases already registered by OAR first
+ # then for the new leases requested by the user
+
+ #Leases already scheduled/running in OAR
+ for lease in leases:
+ current_nodes_reserved_by_start_time[lease['t_from']] = \
+ lease['reserved_nodes']
+ leases_by_start_time[lease['t_from']] = lease
+
+ #First remove job whose duration is too short
+ for job in requested_jobs_dict.values():
+ job['duration'] = \
+ str(int(job['duration']) \
+ * self.driver.iotlab_api.GetLeaseGranularity())
+ if job['duration'] < self.driver.iotlab_api.GetLeaseGranularity():
+ del requested_jobs_dict[job['start_time']]
+
+ #Requested jobs
+ for start_time in requested_jobs_dict:
+ requested_nodes_by_start_time[int(start_time)] = \
+ requested_jobs_dict[start_time]['hostname']
+ #Check if there is any difference between the leases already
+ #registered in OAR and the requested jobs.
+ #Difference could be:
+ #-Lease deleted in the requested jobs
+ #-Added/removed nodes
+ #-Newly added lease
+
+ logger.debug("IOTLABSLICES verify_slice_leases \
+ requested_nodes_by_start_time %s \
+ "% (requested_nodes_by_start_time))
+ #Find all deleted leases
+ start_time_list = \
+ list(set(leases_by_start_time.keys()).\
+ difference(requested_nodes_by_start_time.keys()))
+ deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ for start_time in start_time_list]
+
+
+ #Find added or removed nodes in exisiting leases
+ for start_time in requested_nodes_by_start_time:
+ logger.debug("IOTLABSLICES verify_slice_leases start_time %s \
+ "%( start_time))
+ if start_time in current_nodes_reserved_by_start_time:
+
+ if requested_nodes_by_start_time[start_time] == \
+ current_nodes_reserved_by_start_time[start_time]:
+ continue
+
+ else:
+ update_node_set = \
+ set(requested_nodes_by_start_time[start_time])
+ added_nodes = \
+ update_node_set.difference(\
+ current_nodes_reserved_by_start_time[start_time])
+ shared_nodes = \
+ update_node_set.intersection(\
+ current_nodes_reserved_by_start_time[start_time])
+ old_nodes_set = \
+ set(\
+ current_nodes_reserved_by_start_time[start_time])
+ removed_nodes = \
+ old_nodes_set.difference(\
+ requested_nodes_by_start_time[start_time])
+ logger.debug("IOTLABSLICES verify_slice_leases \
+ shared_nodes %s added_nodes %s removed_nodes %s"\
+ %(shared_nodes, added_nodes,removed_nodes ))
+ #If the lease is modified, delete it before
+ #creating it again.
+ #Add the deleted lease job id in the list
+ #WARNING :rescheduling does not work if there is already
+ # 2 running/scheduled jobs because deleting a job
+ #takes time SA 18/10/2012
+ if added_nodes or removed_nodes:
+ deleted_leases.append(\
+ leases_by_start_time[start_time]['lease_id'])
+ #Reschedule the job
+ if added_nodes or shared_nodes:
+ reschedule_jobs_dict[str(start_time)] = \
+ requested_jobs_dict[str(start_time)]
+
+ else:
+ #New lease
+
+ job = requested_jobs_dict[str(start_time)]
+ logger.debug("IOTLABSLICES \
+ NEWLEASE slice %s job %s"
+ % (sfa_slice, job))
+ self.driver.iotlab_api.AddLeases(
+ job['hostname'],
+ sfa_slice, int(job['start_time']),
+ int(job['duration']))
+
+ #Deleted leases are the ones with lease id not declared in the Rspec
+ if deleted_leases:
+ self.driver.iotlab_api.DeleteLeases(deleted_leases,
+ sfa_slice['user']['uid'])
+ logger.debug("IOTLABSLICES \
+ verify_slice_leases slice %s deleted_leases %s"
+ % (sfa_slice, deleted_leases))
+
+ if reschedule_jobs_dict:
+ for start_time in reschedule_jobs_dict:
+ job = reschedule_jobs_dict[start_time]
+ self.driver.iotlab_api.AddLeases(
+ job['hostname'],
+ sfa_slice, int(job['start_time']),
+ int(job['duration']))
+ return leases
+
+ def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
+ """Check for wanted and unwanted nodes in the slice.
+
+ Removes nodes and associated leases that the user does not want anymore
+ by deleteing the associated job in OAR (DeleteSliceFromNodes).
+ Returns the nodes' hostnames that are going to be in the slice.
+
+ :param sfa_slice: slice record. Must contain node_ids and list_node_ids.
+
+ :param requested_slivers: list of requested nodes' hostnames.
+ :param peer: unused so far.
+
+ :type sfa_slice: dict
+ :type requested_slivers: list
+ :type peer: string
+
+ :returns: list requested nodes hostnames
+ :rtype: list
+
+ .. warning:: UNUSED SQA 24/07/13
+ .. seealso:: DeleteSliceFromNodes
+ .. todo:: check what to do with the peer? Can not remove peer nodes from
+ slice here. Anyway, in this case, the peer should have gotten the
+ remove request too.
+
+ """
+ current_slivers = []
+ deleted_nodes = []
+
+ if 'node_ids' in sfa_slice:
+ nodes = self.driver.iotlab_api.GetNodes(
+ sfa_slice['list_node_ids'],
+ ['hostname'])
+ current_slivers = [node['hostname'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).
+ difference(requested_slivers))
+
+ logger.debug("IOTLABSLICES \tverify_slice_nodes slice %s\
+ \r\n \r\n deleted_nodes %s"
+ % (sfa_slice, deleted_nodes))
+
+ if deleted_nodes:
+ #Delete the entire experience
+ self.driver.iotlab_api.DeleteSliceFromNodes(sfa_slice)
+ return nodes
+
+ def verify_slice(self, slice_hrn, slice_record, sfa_peer):
+ """Ensures slice record exists.
+
+ The slice record must exist either in Iotlab or in the other
+ federated testbed (sfa_peer). If the slice does not belong to Iotlab,
+ check if the user already exists in LDAP. In this case, adds the slice
+ to the sfa DB and associates its LDAP user.
+
+ :param slice_hrn: slice's name
+ :param slice_record: sfa record of the slice
+ :param sfa_peer: name of the peer authority if any.(not Iotlab).
+
+ :type slice_hrn: string
+ :type slice_record: dictionary
+ :type sfa_peer: string
+
+ .. seealso:: AddSlice
+
+
+ """
+
+ slicename = slice_hrn
+ # check if slice belongs to Iotlab
+ slices_list = self.driver.iotlab_api.GetSlices(
+ slice_filter=slicename, slice_filter_type='slice_hrn')
+
+ sfa_slice = None
+
+ if slices_list:
+ for sl in slices_list:
+
+ logger.debug("IOTLABSLICES \t verify_slice slicename %s \
+ slices_list %s sl %s \r slice_record %s"
+ % (slicename, slices_list, sl, slice_record))
+ sfa_slice = sl
+ sfa_slice.update(slice_record)
+
+ else:
+ #Search for user in ldap based on email SA 14/11/12
+ ldap_user = self.driver.iotlab_api.ldap.LdapFindUser(\
+ slice_record['user'])
+ logger.debug(" IOTLABSLICES \tverify_slice Oups \
+ slice_record %s sfa_peer %s ldap_user %s"
+ % (slice_record, sfa_peer, ldap_user))
+ #User already registered in ldap, meaning user should be in SFA db
+ #and hrn = sfa_auth+ uid
+ sfa_slice = {'hrn': slicename,
+ 'node_list': [],
+ 'authority': slice_record['authority'],
+ 'gid': slice_record['gid'],
+ 'slice_id': slice_record['record_id'],
+ 'reg-researchers': slice_record['reg-researchers'],
+ 'peer_authority': str(sfa_peer)
+ }
+
+ if ldap_user:
+ hrn = self.driver.iotlab_api.root_auth + '.' + ldap_user['uid']
+ user = self.driver.get_user_record(hrn)
+
+ logger.debug(" IOTLABSLICES \tverify_slice hrn %s USER %s"
+ % (hrn, user))
+
+ # add the external slice to the local SFA iotlab DB
+ if sfa_slice:
+ self.driver.iotlab_api.AddSlice(sfa_slice, user)
+
+ logger.debug("IOTLABSLICES \tverify_slice ADDSLICE OK")
+ return sfa_slice
+
+
+ def verify_persons(self, slice_hrn, slice_record, users, options={}):
+ """Ensures the users in users list exist and are enabled in LDAP. Adds
+ person if needed.
+
+ Checking that a user exist is based on the user's email. If the user is
+ still not found in the LDAP, it means that the user comes from another
+ federated testbed. In this case an account has to be created in LDAP
+ so as to enable the user to use the testbed, since we trust the testbed
+ he comes from. This is done by calling AddPerson.
+
+ :param slice_hrn: slice name
+ :param slice_record: record of the slice_hrn
+ :param users: users is a record list. Records can either be
+ local records or users records from known and trusted federated
+ sites.If the user is from another site that iotlab doesn't trust yet,
+ then Resolve will raise an error before getting to create_sliver.
+
+ :type slice_hrn: string
+ :type slice_record: string
+ :type users: list
+
+ .. seealso:: AddPerson
+ .. note:: Removed unused peer and sfa_peer parameters. SA 18/07/13.
+
+
+ """
+ #TODO SA 21/08/12 verify_persons Needs review
+
+ logger.debug("IOTLABSLICES \tverify_persons \tslice_hrn %s \
+ \t slice_record %s\r\n users %s \t "
+ % (slice_hrn, slice_record, users))
+ users_by_id = {}
+
+ users_by_email = {}
+ #users_dict : dict whose keys can either be the user's hrn or its id.
+ #Values contains only id and hrn
+ users_dict = {}
+
+ #First create dicts by hrn and id for each user in the user record list:
+ for info in users:
+ if 'slice_record' in info:
+ slice_rec = info['slice_record']
+ if 'user' in slice_rec :
+ user = slice_rec['user']
+
+ if 'email' in user:
+ users_by_email[user['email']] = user
+ users_dict[user['email']] = user
+
+ logger.debug("IOTLABSLICES.PY \t verify_person \
+ users_dict %s \r\n user_by_email %s \r\n \
+ \tusers_by_id %s "
+ % (users_dict, users_by_email, users_by_id))
+
+ existing_user_ids = []
+ existing_user_emails = []
+ existing_users = []
+ # Check if user is in Iotlab LDAP using its hrn.
+ # Assuming Iotlab is centralised : one LDAP for all sites,
+ # user's record_id unknown from LDAP
+ # LDAP does not provide users id, therefore we rely on email to find the
+ # user in LDAP
+
+ if users_by_email:
+ #Construct the list of filters (list of dicts) for GetPersons
+ filter_user = [users_by_email[email] for email in users_by_email]
+ #Check user i in LDAP with GetPersons
+ #Needed because what if the user has been deleted in LDAP but
+ #is still in SFA?
+ existing_users = self.driver.iotlab_api.GetPersons(filter_user)
+ logger.debug(" \r\n IOTLABSLICES.PY \tverify_person filter_user \
+ %s existing_users %s "
+ % (filter_user, existing_users))
+ #User is in iotlab LDAP
+ if existing_users:
+ for user in existing_users:
+ users_dict[user['email']].update(user)
+ existing_user_emails.append(
+ users_dict[user['email']]['email'])
+
+
+ # User from another known trusted federated site. Check
+ # if a iotlab account matching the email has already been created.
+ else:
+ req = 'mail='
+ if isinstance(users, list):
+ req += users[0]['email']
+ else:
+ req += users['email']
+ ldap_reslt = self.driver.iotlab_api.ldap.LdapSearch(req)
+
+ if ldap_reslt:
+ logger.debug(" IOTLABSLICES.PY \tverify_person users \
+ USER already in Iotlab \t ldap_reslt %s \
+ " % (ldap_reslt))
+ existing_users.append(ldap_reslt[1])
+
+ else:
+ #User not existing in LDAP
+ logger.debug("IOTLABSLICES.PY \tverify_person users \
+ not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
+ ldap_reslt %s " % (users, ldap_reslt))
+
+ requested_user_emails = users_by_email.keys()
+ requested_user_hrns = \
+ [users_by_email[user]['hrn'] for user in users_by_email]
+ logger.debug("IOTLABSLICES.PY \tverify_person \
+ users_by_email %s " % (users_by_email))
+
+ #Check that the user of the slice in the slice record
+ #matches one of the existing users
+ try:
+ if slice_record['PI'][0] in requested_user_hrns:
+ logger.debug(" IOTLABSLICES \tverify_person ['PI']\
+ slice_record %s" % (slice_record))
+
+ except KeyError:
+ pass
+
+ # users to be added, removed or updated
+ #One user in one iotlab slice : there should be no need
+ #to remove/ add any user from/to a slice.
+ #However a user from SFA which is not registered in Iotlab yet
+ #should be added to the LDAP.
+ added_user_emails = set(requested_user_emails).\
+ difference(set(existing_user_emails))
+
+
+ #self.verify_keys(existing_slice_users, updated_users_list, \
+ #peer, append)
+
+ added_persons = []
+ # add new users
+ #requested_user_email is in existing_user_emails
+ if len(added_user_emails) == 0:
+ slice_record['login'] = users_dict[requested_user_emails[0]]['uid']
+ logger.debug(" IOTLABSLICES \tverify_person QUICK DIRTY %s"
+ % (slice_record))
+
+ for added_user_email in added_user_emails:
+ added_user = users_dict[added_user_email]
+ logger.debug(" IOTLABSLICES \r\n \r\n \t verify_person \
+ added_user %s" % (added_user))
+ person = {}
+ person['peer_person_id'] = None
+ k_list = ['first_name', 'last_name', 'person_id']
+ for k in k_list:
+ if k in added_user:
+ person[k] = added_user[k]
+
+ person['pkey'] = added_user['keys'][0]
+ person['mail'] = added_user['email']
+ person['email'] = added_user['email']
+ person['key_ids'] = added_user.get('key_ids', [])
+
+ ret = self.driver.iotlab_api.AddPerson(person)
+ if 'uid' in ret:
+ # meaning bool is True and the AddPerson was successful
+ person['uid'] = ret['uid']
+ slice_record['login'] = person['uid']
+ else:
+ # error message in ret
+ logger.debug(" IOTLABSLICES ret message %s" %(ret))
+
+ logger.debug(" IOTLABSLICES \r\n \r\n \t THE SECOND verify_person\
+ person %s" % (person))
+ #Update slice_Record with the id now known to LDAP
+
+
+ added_persons.append(person)
+ return added_persons
+
+
+ def verify_keys(self, persons, users, peer, options={}):
+ """
+ .. warning:: unused
+ """
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.iotlab_api.GetKeys(key_ids, ['key_id', 'key'])
+
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ users_by_key_string = {}
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ users_by_key_string[key_string] = user
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ #try:
+ ##if peer:
+ #person = persondict[user['email']]
+ #self.driver.iotlab_api.UnBindObjectFromPeer(
+ # 'person',person['person_id'],
+ # peer['shortname'])
+ ret = self.driver.iotlab_api.AddPersonKey(
+ user['email'], key)
+ #if peer:
+ #key_index = user_keys.index(key['key'])
+ #remote_key_id = user['key_ids'][key_index]
+ #self.driver.iotlab_api.BindObjectToPeer('key', \
+ #key['key_id'], peer['shortname'], \
+ #remote_key_id)
+
+ #finally:
+ #if peer:
+ #self.driver.iotlab_api.BindObjectToPeer('person', \
+ #person['person_id'], peer['shortname'], \
+ #user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append is False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for key in removed_keys:
+ #if peer:
+ #self.driver.iotlab_api.UnBindObjectFromPeer('key', \
+ #key, peer['shortname'])
+
+ user = users_by_key_string[key]
+ self.driver.iotlab_api.DeleteKey(user, key)
+
+ return
-class slab (Generic):
-
+class iotlab (Generic):
+
# use the standard api class
def api_class (self):
return sfa.server.sfaapi.SfaApi
-
+
# the importer class
- def importer_class (self):
- import sfa.importer.slabimporter
- return sfa.importer.slabimporter.SlabImporter
-
+ def importer_class (self):
+ import sfa.importer.iotlabimporter
+ return sfa.importer.iotlabimporter.IotlabImporter
+
# the manager classes for the server-side services
def registry_manager_class (self) :
- import sfa.managers.registry_manager
+ import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
-
+
def slicemgr_manager_class (self) :
- import sfa.managers.slice_manager
+ import sfa.managers.slice_manager
return sfa.managers.slice_manager.SliceManager
-
+
def aggregate_manager_class (self) :
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
# driver class for server-side services, talk to the whole testbed
def driver_class (self):
- import sfa.managers.v2_to_v3_adapter
- return sfa.managers.v2_to_v3_adapter.V2ToV3Adapter
+ import sfa.iotlab.iotlabdriver
+ return sfa.iotlab.iotlabdriver.IotlabDriver
- # slab does not have a component manager yet
+ # iotlab does not have a component manager yet
# manager class
def component_manager_class (self):
return None
# driver_class
def component_driver_class (self):
- return None
-
-
+ return None
\ No newline at end of file
--- /dev/null
+""" File defining the importer class and all the methods needed to import
+the nodes, users and slices from OAR and LDAP to the SFA database.
+Also creates the iotlab specific databse and table to keep track
+of which slice hrn contains which job.
+"""
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
+
+from sfa.iotlab.iotlabdriver import IotlabDriver
+from sfa.iotlab.iotlabpostgres import TestbedAdditionalSfaDB
+from sfa.trust.certificate import Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
+ RegUser, RegKey
+
+
+from sqlalchemy.exc import SQLAlchemyError
+
+
+class IotlabImporter:
+ """
+ IotlabImporter class, generic importer_class. Used to populate the SFA DB
+ with iotlab resources' records.
+ Used to update records when new resources, users or nodes, are added
+ or deleted.
+ """
+
+ def __init__(self, auth_hierarchy, loc_logger):
+ """
+ Sets and defines import logger and the authority name. Gathers all the
+ records already registerd in the SFA DB, broke them into 3 dicts, by
+ type and hrn, by email and by type and pointer.
+
+ :param auth_hierarchy: authority name
+ :type auth_hierarchy: string
+ :param loc_logger: local logger
+ :type loc_logger: _SfaLogger
+
+ """
+ self.auth_hierarchy = auth_hierarchy
+ self.logger = loc_logger
+ self.logger.setLevelDebug()
+ #retrieve all existing SFA objects
+ self.all_records = dbsession.query(RegRecord).all()
+
+ # initialize record.stale to True by default,
+ # then mark stale=False on the ones that are in use
+ for record in self.all_records:
+ record.stale = True
+ #create hash by (type,hrn)
+ #used to know if a given record is already known to SFA
+ self.records_by_type_hrn = \
+ dict([((record.type, record.hrn), record)
+ for record in self.all_records])
+
+ self.users_rec_by_email = \
+ dict([(record.email, record)
+ for record in self.all_records if record.type == 'user'])
+
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict([((str(record.type), record.pointer), record)
+ for record in self.all_records if record.pointer != -1])
+
+
+ @staticmethod
+ def hostname_to_hrn_escaped(root_auth, hostname):
+ """
+
+ Returns a node's hrn based on its hostname and the root authority and by
+ removing special caracters from the hostname.
+
+ :param root_auth: root authority name
+ :param hostname: nodes's hostname
+ :type root_auth: string
+ :type hostname: string
+ :rtype: string
+ """
+ return '.'.join([root_auth, Xrn.escape(hostname)])
+
+
+ @staticmethod
+ def slicename_to_hrn(person_hrn):
+ """
+
+ Returns the slicename associated to a given person's hrn.
+
+ :param person_hrn: user's hrn
+ :type person_hrn: string
+ :rtype: string
+ """
+ return (person_hrn + '_slice')
+
+ def add_options(self, parser):
+ """
+ .. warning:: not used
+ """
+ # we don't have any options for now
+ pass
+
+ def find_record_by_type_hrn(self, record_type, hrn):
+ """
+ Finds the record associated with the hrn and its type given in parameter
+ if the tuple (hrn, type hrn) is an existing key in the dictionary.
+
+ :param record_type: the record's type (slice, node, authority...)
+ :type record_type: string
+ :param hrn: Human readable name of the object's record
+ :type hrn: string
+ :returns: Returns the record associated with a given hrn and hrn type.
+ Returns None if the key tuple is not in the dictionary.
+ :rtype: RegUser if user, RegSlice if slice, RegNode if node...or None if
+ record does not exist.
+
+ """
+ return self.records_by_type_hrn.get((record_type, hrn), None)
+
+ def locate_by_type_pointer(self, record_type, pointer):
+ """
+
+ Returns the record corresponding to the key pointer and record
+ type. Returns None if the record does not exist and is not in the
+ records_by_type_pointer dictionnary.
+
+ :param record_type: the record's type (slice, node, authority...)
+ :type record_type: string
+ :param pointer:Pointer to where the record is in the origin db,
+ used in case the record comes from a trusted authority.
+ :type pointer: integer
+ :rtype: RegUser if user, RegSlice if slice, RegNode if node...
+ or None if record does not exist.
+ """
+ return self.records_by_type_pointer.get((record_type, pointer), None)
+
+
+ def update_just_added_records_dict(self, record):
+ """
+
+ Updates the records_by_type_hrn dictionnary if the record has
+ just been created.
+
+ :param record: Record to add in the records_by_type_hrn dict.
+ :type record: dictionary
+ """
+ rec_tuple = (record.type, record.hrn)
+ if rec_tuple in self.records_by_type_hrn:
+ self.logger.warning("IotlabImporter.update_just_added_records_dict:\
+ duplicate (%s,%s)" % rec_tuple)
+ return
+ self.records_by_type_hrn[rec_tuple] = record
+
+
+ def import_nodes(self, site_node_ids, nodes_by_id, iotlabdriver):
+ """
+
+ Creates appropriate hostnames and RegNode records for each node in
+ site_node_ids, based on the information given by the dict nodes_by_id
+ that was made from data from OAR. Saves the records to the DB.
+
+ :param site_node_ids: site's node ids
+ :type site_node_ids: list of integers
+ :param nodes_by_id: dictionary , key is the node id, value is the a dict
+ with node information.
+ :type nodes_by_id: dictionary
+ :param iotlabdriver: IotlabDriver object, used to have access to
+ iotlabdriver attributes.
+ :type iotlabdriver: IotlabDriver
+
+ :returns: None
+ :rtype: None
+
+ """
+
+ for node_id in site_node_ids:
+ try:
+ node = nodes_by_id[node_id]
+ except KeyError:
+ self.logger.warning("IotlabImporter: cannot find node_id %s \
+ - ignored" % (node_id))
+ continue
+ escaped_hrn = \
+ self.hostname_to_hrn_escaped(iotlabdriver.iotlab_api.root_auth,
+ node['hostname'])
+ self.logger.info("IOTLABIMPORTER node %s " % (node))
+ hrn = node['hrn']
+
+ # xxx this sounds suspicious
+ if len(hrn) > 64:
+ hrn = hrn[:64]
+ node_record = self.find_record_by_type_hrn('node', hrn)
+ if not node_record:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(escaped_hrn, 'node')
+ node_gid = \
+ self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+
+ def iotlab_get_authority(hrn):
+ """ Gets the authority part in the hrn.
+ :param hrn: hrn whose authority we are looking for.
+ :type hrn: string
+ :returns: splits the hrn using the '.' separator and returns
+ the authority part of the hrn.
+ :rtype: string
+
+ """
+ return hrn.split(".")[0]
+
+ node_record = RegNode(hrn=hrn, gid=node_gid,
+ pointer='-1',
+ authority=iotlab_get_authority(hrn))
+ try:
+
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("IotlabImporter: imported node: %s"
+ % node_record)
+ self.update_just_added_records_dict(node_record)
+ except SQLAlchemyError:
+ self.logger.log_exc("IotlabImporter: failed to import node")
+ else:
+ #TODO: xxx update the record ...
+ pass
+ node_record.stale = False
+
+ def import_sites_and_nodes(self, iotlabdriver):
+ """
+
+ Gets all the sites and nodes from OAR, process the information,
+ creates hrns and RegAuthority for sites, and feed them to the database.
+ For each site, import the site's nodes to the DB by calling
+ import_nodes.
+
+ :param iotlabdriver: IotlabDriver object, used to have access to
+ iotlabdriver methods and fetching info on sites and nodes.
+ :type iotlabdriver: IotlabDriver
+ """
+
+ sites_listdict = iotlabdriver.iotlab_api.GetSites()
+ nodes_listdict = iotlabdriver.iotlab_api.GetNodes()
+ nodes_by_id = dict([(node['node_id'], node) for node in nodes_listdict])
+ for site in sites_listdict:
+ site_hrn = site['name']
+ site_record = self.find_record_by_type_hrn ('authority', site_hrn)
+ self.logger.info("IotlabImporter: import_sites_and_nodes \
+ (site) %s \r\n " % site_record)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = \
+ RegAuthority(hrn=site_hrn,
+ gid=auth_info.get_gid_object(),
+ pointer='-1',
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("IotlabImporter: imported authority \
+ (site) %s" % site_record)
+ self.update_just_added_records_dict(site_record)
+ except SQLAlchemyError:
+ # if the site import fails then there is no point in
+ # trying to import the
+ # site's child records(node, slices, persons), so skip them.
+ self.logger.log_exc("IotlabImporter: failed to import \
+ site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+
+ site_record.stale = False
+ self.import_nodes(site['node_ids'], nodes_by_id, iotlabdriver)
+
+ return
+
+
+
+ def init_person_key(self, person, iotlab_key):
+ """
+
+ Returns a tuple pubkey and pkey.
+
+ :param person Person's data.
+ :type person: dict
+ :param iotlab_key: SSH public key, from LDAP user's data.
+ RSA type supported.
+ :type iotlab_key: string
+ :rtype (string, Keypair)
+
+ """
+ pubkey = None
+ if person['pkey']:
+ # randomly pick first key in set
+ pubkey = iotlab_key
+
+ try:
+ pkey = convert_public_key(pubkey)
+ except TypeError:
+ #key not good. create another pkey
+ self.logger.warn("IotlabImporter: \
+ unable to convert public \
+ key for %s" % person['hrn'])
+ pkey = Keypair(create=True)
+
+ else:
+ # the user has no keys.
+ #Creating a random keypair for the user's gid
+ self.logger.warn("IotlabImporter: person %s does not have a \
+ public key" % (person['hrn']))
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+ def import_persons_and_slices(self, iotlabdriver):
+ """
+
+ Gets user data from LDAP, process the information.
+ Creates hrn for the user's slice, the user's gid, creates
+ the RegUser record associated with user. Creates the RegKey record
+ associated nwith the user's key.
+ Saves those records into the SFA DB.
+ import the user's slice onto the database as well by calling
+ import_slice.
+
+ :param iotlabdriver: IotlabDriver object, used to have access to
+ iotlabdriver attributes.
+ :type iotlabdriver: IotlabDriver
+ """
+ ldap_person_listdict = iotlabdriver.iotlab_api.GetPersons()
+ self.logger.info("IOTLABIMPORT \t ldap_person_listdict %s \r\n"
+ % (ldap_person_listdict))
+
+ # import persons
+ for person in ldap_person_listdict:
+
+ self.logger.info("IotlabImporter: person :" % (person))
+ if 'ssh-rsa' not in person['pkey']:
+ #people with invalid ssh key (ssh-dss, empty, bullshit keys...)
+ #won't be imported
+ continue
+ person_hrn = person['hrn']
+ slice_hrn = self.slicename_to_hrn(person['hrn'])
+
+ # xxx suspicious again
+ if len(person_hrn) > 64:
+ person_hrn = person_hrn[:64]
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+
+ self.logger.info("IotlabImporter: users_rec_by_email %s "
+ % (self.users_rec_by_email))
+
+ #Check if user using person['email'] from LDAP is already registered
+ #in SFA. One email = one person. In this case, do not create another
+ #record for this person
+ #person_hrn returned by GetPerson based on iotlab root auth +
+ #uid ldap
+ user_record = self.find_record_by_type_hrn('user', person_hrn)
+
+ if not user_record and person['email'] in self.users_rec_by_email:
+ user_record = self.users_rec_by_email[person['email']]
+ person_hrn = user_record.hrn
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+
+ slice_record = self.find_record_by_type_hrn('slice', slice_hrn)
+
+ iotlab_key = person['pkey']
+ # new person
+ if not user_record:
+ (pubkey, pkey) = self.init_person_key(person, iotlab_key)
+ if pubkey is not None and pkey is not None:
+ person_gid = \
+ self.auth_hierarchy.create_gid(person_urn,
+ create_uuid(), pkey)
+ if person['email']:
+ self.logger.debug("IOTLAB IMPORTER \
+ PERSON EMAIL OK email %s " % (person['email']))
+ person_gid.set_email(person['email'])
+ user_record = \
+ RegUser(hrn=person_hrn,
+ gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn),
+ email=person['email'])
+ else:
+ user_record = \
+ RegUser(hrn=person_hrn,
+ gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn))
+
+ if pubkey:
+ user_record.reg_keys = [RegKey(pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"
+ % (user_record))
+
+ try:
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("IotlabImporter: imported person \
+ %s" % (user_record))
+ self.update_just_added_records_dict(user_record)
+
+ except SQLAlchemyError:
+ self.logger.log_exc("IotlabImporter: \
+ failed to import person %s" % (person))
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update
+ # the users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+
+ new_key = False
+ if iotlab_key is not sfa_keys:
+ new_key = True
+ if new_key:
+ self.logger.info("IotlabImporter: \t \t USER UPDATE \
+ person: %s" % (person['hrn']))
+ (pubkey, pkey) = self.init_person_key(person, iotlab_key)
+ person_gid = \
+ self.auth_hierarchy.create_gid(person_urn,
+ create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys = []
+ else:
+ user_record.reg_keys = [RegKey(pubkey)]
+ self.logger.info("IotlabImporter: updated person: %s"
+ % (user_record))
+
+ if person['email']:
+ user_record.email = person['email']
+
+ try:
+ dbsession.commit()
+ user_record.stale = False
+ except SQLAlchemyError:
+ self.logger.log_exc("IotlabImporter: \
+ failed to update person %s"% (person))
+
+ self.import_slice(slice_hrn, slice_record, user_record)
+
+
+ def import_slice(self, slice_hrn, slice_record, user_record):
+ """
+
+ Create RegSlice record according to the slice hrn if the slice
+ does not exist yet.Creates a relationship with the user record
+ associated with the slice.
+ Commit the record to the database.
+
+
+ :param slice_hrn: Human readable name of the slice.
+ :type slice_hrn: string
+ :param slice_record: record of the slice found in the DB, if any.
+ :type slice_record: RegSlice or None
+ :param user_record: user record found in the DB if any.
+ :type user_record: RegUser
+
+ .. todo::Update the record if a slice record already exists.
+ """
+ if not slice_record:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = \
+ self.auth_hierarchy.create_gid(urn,
+ create_uuid(), pkey)
+ slice_record = RegSlice(hrn=slice_hrn, gid=slice_gid,
+ pointer='-1',
+ authority=get_authority(slice_hrn))
+ try:
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+
+
+ self.update_just_added_records_dict(slice_record)
+
+ except SQLAlchemyError:
+ self.logger.log_exc("IotlabImporter: failed to import slice")
+
+ #No slice update upon import in iotlab
+ else:
+ # xxx update the record ...
+ self.logger.warning("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+
+
+ slice_record.reg_researchers = [user_record]
+ try:
+ dbsession.commit()
+ slice_record.stale = False
+ except SQLAlchemyError:
+ self.logger.log_exc("IotlabImporter: failed to update slice")
+
+
+ def run(self, options):
+ """
+ Create the special iotlab table, testbed_xp, in the iotlab database.
+ Import everything (users, slices, nodes and sites from OAR
+ and LDAP) into the SFA database.
+ Delete stale records that are no longer in OAR or LDAP.
+ :param options:
+ :type options:
+ """
+ config = Config()
+
+ iotlabdriver = IotlabDriver(config)
+ iotlab_db = TestbedAdditionalSfaDB(config)
+ #Create special slice table for iotlab
+
+ if not iotlab_db.exists('testbed_xp'):
+ iotlab_db.createtable()
+ self.logger.info("IotlabImporter.run: testbed_xp table created ")
+
+ # import site and node records in site into the SFA db.
+ self.import_sites_and_nodes(iotlabdriver)
+ #import users and slice into the SFA DB.
+ self.import_persons_and_slices(iotlabdriver)
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [iotlabdriver.hrn, iotlabdriver.iotlab_api.root_auth,
+ iotlabdriver.hrn + '.slicemanager']
+ for record in self.all_records:
+ if record.hrn in system_hrns:
+ record.stale = False
+ if record.peer_authority:
+ record.stale = False
+
+ for record in self.all_records:
+ if record.type == 'user':
+ self.logger.info("IotlabImporter: stale records: hrn %s %s"
+ % (record.hrn, record.stale))
+ try:
+ stale = record.stale
+ except:
+ stale = True
+ self.logger.warning("stale not found with %s" % record)
+ if stale:
+ self.logger.info("IotlabImporter: deleting stale record: %s"
+ % (record))
+
+ try:
+ dbsession.delete(record)
+ dbsession.commit()
+ except SQLAlchemyError:
+ self.logger.log_exc("IotlabImporter: failed to delete \
+ stale record %s" % (record))
# Get all plc sites
# retrieve only required stuf
sites = shell.GetSites({'peer_id': None, 'enabled' : True},
- ['site_id','login_base','node_ids','slice_ids','person_ids',])
+ ['site_id','login_base','node_ids','slice_ids','person_ids', 'name'])
# create a hash of sites by login_base
# sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
# Get all plc users
# start importing
for site in sites:
+ if site['name'].startswith('sfa.'):
+ continue
+
site_hrn = _get_site_hrn(interface_hrn, site)
# import if hrn is not in list of existing hrns or if the hrn exists
# but its not a site record
pass
node_record.stale=False
- site_pis=set()
+ site_pis=[]
# import persons
for person_id in site['person_ids']:
proceed=False
# this is valid for all sites she is in..
# PI is coded with role_id==20
if 20 in person['role_ids']:
- site_pis.add (user_record)
+ site_pis.append (user_record)
except:
self.logger.log_exc("PlImporter: failed to import person %d %s"%(person['person_id'],person['email']))
# being improperly handled, and where the whole loop on persons
# could be performed twice with the same person...
# so hopefully we do not need to eliminate duplicates explicitly here anymore
- site_record.reg_pis = list(site_pis)
+ site_record.reg_pis = list(set(site_pis))
dbsession.commit()
# import slices
except:
self.logger.log_exc("PlImporter: failed to import slice %s (%s)"%(slice_hrn,slice['name']))
else:
- # update the pointer if it has changed
- if slice_id != slice_record.pointer:
- self.logger.info("updating record (slice) pointer")
- slice_record.pointer = slice_id
- dbsession.commit()
# xxx update the record ...
# given that we record the current set of users anyways, there does not seem to be much left to do here
# self.logger.warning ("Slice update not yet implemented on slice %s (%s)"%(slice_hrn,slice['name']))
- #pass
+ pass
# record current users affiliated with the slice
slice_record.reg_researchers = \
[ self.locate_by_type_pointer ('user',user_id) for user_id in slice['person_ids'] ]
+++ /dev/null
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
-
-from sfa.senslab.slabdriver import SlabDriver
-
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.gid import create_uuid
-
-from sfa.storage.alchemy import dbsession
-from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
- RegUser, RegKey
-
-
-from sqlalchemy.exc import SQLAlchemyError
-
-
-
-class SlabImporter:
- """
- SlabImporter class, generic importer_class. Used to populate the SFA DB
- with senslab resources' records.
- Used to update records when new resources, users or nodes, are added
- or deleted.
- """
-
- def __init__ (self, auth_hierarchy, loc_logger):
- """
- Sets and defines import logger and the authority name. Gathers all the
- records already registerd in the SFA DB, broke them into 3 dicts,
- by type and hrn, by email and by type and pointer.
-
- :param auth_hierarchy: authority name
- :type auth_hierarchy: string
- :param loc_logger: local logger
- :type loc_logger: _SfaLogger
-
- """
- self.auth_hierarchy = auth_hierarchy
- self.logger = loc_logger
- self.logger.setLevelDebug()
- #retrieve all existing SFA objects
- self.all_records = dbsession.query(RegRecord).all()
-
- # initialize record.stale to True by default,
- # then mark stale=False on the ones that are in use
- for record in self.all_records:
- record.stale = True
- #create hash by (type,hrn)
- #used to know if a given record is already known to SFA
- self.records_by_type_hrn = \
- dict([( (record.type,record.hrn), record) \
- for record in self.all_records])
-
- self.users_rec_by_email = \
- dict([ (record.email, record) \
- for record in self.all_records if record.type == 'user'])
-
- # create hash by (type,pointer)
- self.records_by_type_pointer = \
- dict([ ( (str(record.type), record.pointer) , record) \
- for record in self.all_records if record.pointer != -1])
-
-
-
- @staticmethod
- def hostname_to_hrn_escaped(root_auth, hostname):
- """
-
- Returns a node's hrn based on its hostname and the root
- authority and by removing special caracters from the hostname.
-
- :param root_auth: root authority name
- :param hostname: nodes's hostname
- :type root_auth: string
- :type hostname: string
- :rtype: string
- """
- return '.'.join( [root_auth, Xrn.escape(hostname)] )
-
-
- @staticmethod
- def slicename_to_hrn(person_hrn):
- """
-
- Returns the slicename associated to a given person's hrn.
-
- :param person_hrn: user's hrn
- :type person_hrn: string
- :rtype: string
- """
- return (person_hrn +'_slice')
-
- def add_options (self, parser):
- # we don't have any options for now
- pass
-
- def find_record_by_type_hrn(self, record_type, hrn):
- """
-
- Returns the record associated with a given hrn and hrn type.
- Returns None if the key tuple is not in dictionary.
-
- :param record_type: the record's type (slice, node, authority...)
- :type record_type: string
- :param hrn: Human readable name of the object's record
- :type hrn: string
- :rtype: RegUser if user, RegSlice if slice, RegNode if node...
- or None if record does not exist.
-
- """
- return self.records_by_type_hrn.get ( (record_type, hrn), None)
-
- def locate_by_type_pointer (self, record_type, pointer):
- """
-
- Returns the record corresponding to the key pointer and record
- type. Returns None if the record does not exist and is not in the
- records_by_type_pointer dictionnary.
-
- :param record_type: the record's type (slice, node, authority...)
- :type record_type: string
- :param pointer:Pointer to where the record is in the origin db,
- used in case the record comes from a trusted authority.
- :type pointer: integer
- :rtype: RegUser if user, RegSlice if slice, RegNode if node...
- or None if record does not exist.
- """
- return self.records_by_type_pointer.get ( (record_type, pointer), None)
-
-
- def update_just_added_records_dict (self, record):
- """
-
- Updates the records_by_type_hrn dictionnary if the record has
- just been created.
-
- :param record: Record to add in the records_by_type_hrn dict.
- :type record: dictionary
- """
- rec_tuple = (record.type, record.hrn)
- if rec_tuple in self.records_by_type_hrn:
- self.logger.warning ("SlabImporter.update_just_added_records_dict:\
- duplicate (%s,%s)"%rec_tuple)
- return
- self.records_by_type_hrn [ rec_tuple ] = record
-
- def import_sites_and_nodes(self, slabdriver):
- """
-
- Gets all the sites and nodes from OAR, process the information,
- creates hrns and RegAuthority for sites, and feed them to the database.
- For each site, import the site's nodes to the DB by calling
- import_nodes.
-
- :param slabdriver: SlabDriver object, used to have access to slabdriver
- methods and fetching info on sites and nodes.
- :type slabdriver: SlabDriver
- """
-
- sites_listdict = slabdriver.slab_api.GetSites()
- nodes_listdict = slabdriver.slab_api.GetNodes()
- nodes_by_id = dict([(node['node_id'], node) for node in nodes_listdict])
- for site in sites_listdict:
- site_hrn = site['name']
- site_record = self.find_record_by_type_hrn ('authority', site_hrn)
- if not site_record:
- try:
- urn = hrn_to_urn(site_hrn, 'authority')
- if not self.auth_hierarchy.auth_exists(urn):
- self.auth_hierarchy.create_auth(urn)
-
- auth_info = self.auth_hierarchy.get_auth_info(urn)
- site_record = RegAuthority(hrn=site_hrn, \
- gid=auth_info.get_gid_object(),
- pointer='-1',
- authority=get_authority(site_hrn))
- site_record.just_created()
- dbsession.add(site_record)
- dbsession.commit()
- self.logger.info("SlabImporter: imported authority (site) \
- %s" % site_record)
- self.update_just_added_records_dict(site_record)
- except SQLAlchemyError:
- # if the site import fails then there is no point in
- # trying to import the
- # site's child records(node, slices, persons), so skip them.
- self.logger.log_exc("SlabImporter: failed to import site. \
- Skipping child records")
- continue
- else:
- # xxx update the record ...
- pass
-
-
- site_record.stale = False
- self.import_nodes(site['node_ids'], nodes_by_id, slabdriver)
-
- return
-
- def import_nodes(self, site_node_ids, nodes_by_id, slabdriver) :
- """
-
- Creates appropriate hostnames and RegNode records for
- each node in site_node_ids, based on the information given by the
- dict nodes_by_id that was made from data from OAR.
- Saves the records to the DB.
-
- :param site_node_ids: site's node ids
- :type site_node_ids: list of integers
- :param nodes_by_id: dictionary , key is the node id, value is the a dict
- with node information.
- :type nodes_by_id: dictionary
- :param slabdriver:SlabDriver object, used to have access to slabdriver
- attributes.
- :type slabdriver:SlabDriver
-
- """
-
- for node_id in site_node_ids:
- try:
- node = nodes_by_id[node_id]
- except KeyError:
- self.logger.warning ("SlabImporter: cannot find node_id %s \
- - ignored" %(node_id))
- continue
- escaped_hrn = \
- self.hostname_to_hrn_escaped(slabdriver.slab_api.root_auth, \
- node['hostname'])
- self.logger.info("SLABIMPORTER node %s " %(node))
- hrn = node['hrn']
-
-
- # xxx this sounds suspicious
- if len(hrn) > 64:
- hrn = hrn[:64]
- node_record = self.find_record_by_type_hrn( 'node', hrn )
- if not node_record:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(escaped_hrn, 'node')
- node_gid = \
- self.auth_hierarchy.create_gid(urn, \
- create_uuid(), pkey)
-
- def slab_get_authority(hrn):
- return hrn.split(".")[0]
-
- node_record = RegNode(hrn=hrn, gid=node_gid,
- pointer = '-1',
- authority=slab_get_authority(hrn))
- try:
-
- node_record.just_created()
- dbsession.add(node_record)
- dbsession.commit()
- self.logger.info("SlabImporter: imported node: %s" \
- % node_record)
- self.update_just_added_records_dict(node_record)
- except SQLAlchemyError:
- self.logger.log_exc("SlabImporter: \
- failed to import node")
- else:
- #TODO: xxx update the record ...
- pass
- node_record.stale = False
-
-
- def init_person_key (self, person, slab_key):
- """
-
- Returns a tuple pubkey and pkey.
-
- :param person Person's data.
- :type person: dict
- :param slab_key: SSH public key, from LDAP user's data.
- RSA type supported.
- :type slab_key: string
- :rtype (string, Keypair)
- """
- pubkey = None
- if person['pkey']:
- # randomly pick first key in set
- pubkey = slab_key
-
- try:
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- self.logger.warn('SlabImporter: \
- unable to convert public \
- key for %s' %person['hrn'])
- pkey = Keypair(create=True)
-
- else:
- # the user has no keys.
- #Creating a random keypair for the user's gid
- self.logger.warn("SlabImporter: person %s does not have a \
- public key" %(person['hrn']))
- pkey = Keypair(create=True)
- return (pubkey, pkey)
-
-
- def import_persons_and_slices(self, slabdriver):
- """
-
- Gets user data from LDAP, process the information.
- Creates hrn for the user's slice, the user's gid, creates
- the RegUser record associated with user. Creates the RegKey record
- associated nwith the user's key.
- Saves those records into the SFA DB.
- import the user's slice onto the database as well by calling
- import_slice.
-
- :param slabdriver:SlabDriver object, used to have access to slabdriver
- attributes.
- :type slabdriver:SlabDriver
- """
- ldap_person_listdict = slabdriver.slab_api.GetPersons()
- self.logger.info("SLABIMPORT \t ldap_person_listdict %s \r\n" \
- %(ldap_person_listdict))
-
- # import persons
- for person in ldap_person_listdict :
-
- self.logger.info("SlabImporter: person :" %(person))
- if 'ssh-rsa' not in person['pkey']:
- #people with invalid ssh key (ssh-dss, empty, bullshit keys...)
- #won't be imported
- continue
- person_hrn = person['hrn']
- slice_hrn = self.slicename_to_hrn(person['hrn'])
-
- # xxx suspicious again
- if len(person_hrn) > 64:
- person_hrn = person_hrn[:64]
- person_urn = hrn_to_urn(person_hrn, 'user')
-
-
- self.logger.info("SlabImporter: users_rec_by_email %s " \
- %(self.users_rec_by_email))
-
- #Check if user using person['email'] from LDAP is already registered
- #in SFA. One email = one person. In this case, do not create another
- #record for this person
- #person_hrn returned by GetPerson based on senslab root auth +
- #uid ldap
- user_record = self.find_record_by_type_hrn('user', person_hrn)
-
- if not user_record and person['email'] in self.users_rec_by_email:
- user_record = self.users_rec_by_email[person['email']]
- person_hrn = user_record.hrn
- person_urn = hrn_to_urn(person_hrn, 'user')
-
-
- slice_record = self.find_record_by_type_hrn ('slice', slice_hrn)
-
- slab_key = person['pkey']
- # new person
- if not user_record:
- (pubkey, pkey) = self.init_person_key(person, slab_key)
- if pubkey is not None and pkey is not None :
- person_gid = \
- self.auth_hierarchy.create_gid(person_urn, \
- create_uuid(), pkey)
- if person['email']:
- self.logger.debug( "SLAB IMPORTER \
- PERSON EMAIL OK email %s " %(person['email']))
- person_gid.set_email(person['email'])
- user_record = RegUser(hrn=person_hrn, \
- gid=person_gid,
- pointer='-1',
- authority=get_authority(person_hrn),
- email=person['email'])
- else:
- user_record = RegUser(hrn=person_hrn, \
- gid=person_gid,
- pointer='-1',
- authority=get_authority(person_hrn))
-
- if pubkey:
- user_record.reg_keys = [RegKey(pubkey)]
- else:
- self.logger.warning("No key found for user %s" \
- %(user_record))
-
- try:
- user_record.just_created()
- dbsession.add (user_record)
- dbsession.commit()
- self.logger.info("SlabImporter: imported person %s"\
- %(user_record))
- self.update_just_added_records_dict( user_record )
-
- except SQLAlchemyError:
- self.logger.log_exc("SlabImporter: \
- failed to import person %s"%(person))
- else:
- # update the record ?
- # if user's primary key has changed then we need to update
- # the users gid by forcing an update here
- sfa_keys = user_record.reg_keys
-
- new_key = False
- if slab_key is not sfa_keys :
- new_key = True
- if new_key:
- self.logger.info("SlabImporter: \t \t USER UPDATE \
- person: %s" %(person['hrn']))
- (pubkey, pkey) = self.init_person_key (person, slab_key)
- person_gid = \
- self.auth_hierarchy.create_gid(person_urn, \
- create_uuid(), pkey)
- if not pubkey:
- user_record.reg_keys = []
- else:
- user_record.reg_keys = [RegKey(pubkey)]
- self.logger.info("SlabImporter: updated person: %s" \
- % (user_record))
-
- if person['email']:
- user_record.email = person['email']
-
- try:
- dbsession.commit()
- user_record.stale = False
- except SQLAlchemyError:
- self.logger.log_exc("SlabImporter: \
- failed to update person %s"%(person))
-
- self.import_slice(slice_hrn, slice_record, user_record)
-
-
- def import_slice(self, slice_hrn, slice_record, user_record):
- """
-
- Create RegSlice record according to the slice hrn if the slice
- does not exist yet.Creates a relationship with the user record
- associated with the slice.
- Commit the record to the database.
-
-
- :param slice_hrn: Human readable name of the slice.
- :type slice_hrn: string
- :param slice_record: record of the slice found in the DB, if any.
- :type slice_record: RegSlice or None
- :param user_record: user record found in the DB if any.
- :type user_record: RegUser
-
- .. todo::Update the record if a slice record already exists.
- """
- if not slice_record :
- pkey = Keypair(create=True)
- urn = hrn_to_urn(slice_hrn, 'slice')
- slice_gid = \
- self.auth_hierarchy.create_gid(urn, \
- create_uuid(), pkey)
- slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
- pointer='-1',
- authority=get_authority(slice_hrn))
- try:
- slice_record.just_created()
- dbsession.add(slice_record)
- dbsession.commit()
-
-
- self.update_just_added_records_dict ( slice_record )
-
- except SQLAlchemyError:
- self.logger.log_exc("SlabImporter: failed to import slice")
-
- #No slice update upon import in senslab
- else:
- # xxx update the record ...
- self.logger.warning ("Slice update not yet implemented")
- pass
- # record current users affiliated with the slice
-
-
- slice_record.reg_researchers = [user_record]
- try:
- dbsession.commit()
- slice_record.stale = False
- except SQLAlchemyError:
- self.logger.log_exc("SlabImporter: failed to update slice")
-
-
- def run (self, options):
- """
- Create the special senslab table, slab_xp, in the senslab database.
- Import everything (users, slices, nodes and sites from OAR
- and LDAP) into the SFA database.
- Delete stale records that are no longer in OAR or LDAP.
- :param options:
- :type options:
- """
- config = Config()
-
- slabdriver = SlabDriver(config)
-
- #Create special slice table for senslab
-
- if not slabdriver.db.exists('slab_xp'):
- slabdriver.db.createtable()
- self.logger.info ("SlabImporter.run: slab_xp table created ")
-
-
- # import site and node records in site into the SFA db.
- self.import_sites_and_nodes(slabdriver)
- #import users and slice into the SFA DB.
- self.import_persons_and_slices(slabdriver)
-
- ### remove stale records
- # special records must be preserved
- system_hrns = [slabdriver.hrn, slabdriver.slab_api.root_auth, \
- slabdriver.hrn+ '.slicemanager']
- for record in self.all_records:
- if record.hrn in system_hrns:
- record.stale = False
- if record.peer_authority:
- record.stale = False
-
-
- for record in self.all_records:
- if record.type == 'user':
- self.logger.info("SlabImporter: stale records: hrn %s %s" \
- %(record.hrn,record.stale) )
- try:
- stale = record.stale
- except :
- stale = True
- self.logger.warning("stale not found with %s"%record)
- if stale:
- self.logger.info("SlabImporter: deleting stale record: %s" \
- %(record))
-
- try:
- dbsession.delete(record)
- dbsession.commit()
- except SQLAlchemyError:
- self.logger.log_exc("SlabImporter: failed to delete stale \
- record %s" %(record) )
-
-
--- /dev/null
+"""
+This API is adapted for OpenLDAP. The file contains all LDAP classes and methods
+needed to:
+- Load the LDAP connection configuration file (login, address..) with LdapConfig
+- Connect to LDAP with ldap_co
+- Create a unique LDAP login and password for a user based on his email or last
+name and first name with LoginPassword.
+- Manage entries in LDAP using SFA records with LDAPapi (Search, Add, Delete,
+Modify)
+
+"""
+import random
+from passlib.hash import ldap_salted_sha1 as lssha
+
+from sfa.util.xrn import get_authority
+from sfa.util.sfalogging import logger
+from sfa.util.config import Config
+
+import ldap
+import ldap.modlist as modlist
+
+import os.path
+
+
+class LdapConfig():
+ """
+ Ldap configuration class loads the configuration file and sets the
+ ldap IP address, password, people dn, web dn, group dn. All these settings
+ were defined in a separate file ldap_config.py to avoid sharing them in
+ the SFA git as it contains sensible information.
+
+ """
+ def __init__(self, config_file='/etc/sfa/ldap_config.py'):
+ """Loads configuration from file /etc/sfa/ldap_config.py and set the
+ parameters for connection to LDAP.
+
+ """
+
+ try:
+ execfile(config_file, self.__dict__)
+
+ self.config_file = config_file
+ # path to configuration data
+ self.config_path = os.path.dirname(config_file)
+ except IOError:
+ raise IOError, "Could not find or load the configuration file: %s" \
+ % config_file
+
+
+class ldap_co:
+ """ Set admin login and server configuration variables."""
+
+ def __init__(self):
+ """Fetch LdapConfig attributes (Ldap server connection parameters and
+ defines port , version and subtree scope.
+
+ """
+ #Iotlab PROD LDAP parameters
+ self.ldapserv = None
+ ldap_config = LdapConfig()
+ self.config = ldap_config
+ self.ldapHost = ldap_config.LDAP_IP_ADDRESS
+ self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
+ self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
+ self.ldapAdminDN = ldap_config.LDAP_WEB_DN
+ self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
+ self.ldapPort = ldap.PORT
+ self.ldapVersion = ldap.VERSION3
+ self.ldapSearchScope = ldap.SCOPE_SUBTREE
+
+ def connect(self, bind=True):
+ """Enables connection to the LDAP server.
+
+ :param bind: Set the bind parameter to True if a bind is needed
+ (for add/modify/delete operations). Set to False otherwise.
+ :type bind: boolean
+ :returns: dictionary with status of the connection. True if Successful,
+ False if not and in this case the error
+ message( {'bool', 'message'} ).
+ :rtype: dict
+
+ """
+ try:
+ self.ldapserv = ldap.open(self.ldapHost)
+ except ldap.LDAPError, error:
+ return {'bool': False, 'message': error}
+
+ # Bind with authentification
+ if(bind):
+ return self.bind()
+
+ else:
+ return {'bool': True}
+
+ def bind(self):
+ """ Binding method.
+
+ :returns: dictionary with the bind status. True if Successful,
+ False if not and in this case the error message({'bool','message'})
+ :rtype: dict
+
+ """
+ try:
+ # Opens a connection after a call to ldap.open in connect:
+ self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
+
+ # Bind/authenticate with a user with apropriate
+ #rights to add objects
+ self.ldapserv.simple_bind_s(self.ldapAdminDN,
+ self.ldapAdminPassword)
+
+ except ldap.LDAPError, error:
+ return {'bool': False, 'message': error}
+
+ return {'bool': True}
+
+ def close(self):
+ """Close the LDAP connection.
+
+ Can throw an exception if the unbinding fails.
+
+ :returns: dictionary with the bind status if the unbinding failed and
+ in this case the dict contains an error message. The dictionary keys
+ are : ({'bool','message'})
+ :rtype: dict or None
+
+ """
+ try:
+ self.ldapserv.unbind_s()
+ except ldap.LDAPError, error:
+ return {'bool': False, 'message': error}
+
+
+class LoginPassword():
+ """
+
+ Class to handle login and password generation, using custom login generation
+ algorithm.
+
+ """
+ def __init__(self):
+ """
+
+ Sets password and login maximum length, and defines the characters that
+ can be found in a random generated password.
+
+ """
+ self.login_max_length = 8
+ self.length_password = 8
+ self.chars_password = ['!', '$', '(',')', '*', '+', ',', '-', '.',
+ '0', '1', '2', '3', '4', '5', '6', '7', '8',
+ '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
+ 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
+ '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
+ 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
+ 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+ '\'']
+
+ @staticmethod
+ def clean_user_names(record):
+ """
+
+ Removes special characters such as '-', '_' , '[', ']' and ' ' from the
+ first name and last name.
+
+ :param record: user's record
+ :type record: dict
+ :returns: lower_first_name and lower_last_name if they were found
+ in the user's record. Return None, none otherwise.
+ :rtype: string, string or None, None.
+
+ """
+ if 'first_name' in record and 'last_name' in record:
+ #Remove all special characters from first_name/last name
+ lower_first_name = record['first_name'].replace('-', '')\
+ .replace('_', '').replace('[', '')\
+ .replace(']', '').replace(' ', '')\
+ .lower()
+ lower_last_name = record['last_name'].replace('-', '')\
+ .replace('_', '').replace('[', '')\
+ .replace(']', '').replace(' ', '')\
+ .lower()
+ return lower_first_name, lower_last_name
+ else:
+ return None, None
+
+ @staticmethod
+ def extract_name_from_email(record):
+ """
+
+ When there is no valid first name and last name in the record,
+ the email is used to generate the login. Here, we assume the email
+ is firstname.lastname@something.smthg. The first name and last names
+ are extracted from the email, special charcaters are removed and
+ they are changed into lower case.
+
+ :param record: user's data
+ :type record: dict
+ :returns: the first name and last name taken from the user's email.
+ lower_first_name, lower_last_name.
+ :rtype: string, string
+
+ """
+
+ email = record['email']
+ email = email.split('@')[0].lower()
+ lower_first_name = None
+ lower_last_name = None
+ #Assume there is first name and last name in email
+ #if there is a separator
+ separator_list = ['.', '_', '-']
+ for sep in separator_list:
+ if sep in email:
+ mail = email.split(sep)
+ lower_first_name = mail[0]
+ lower_last_name = mail[1]
+ break
+
+ #Otherwise just take the part before the @ as the
+ #lower_first_name and lower_last_name
+ if lower_first_name is None:
+ lower_first_name = email
+ lower_last_name = email
+
+ return lower_first_name, lower_last_name
+
+ def get_user_firstname_lastname(self, record):
+ """
+
+ Get the user first name and last name from the information we have in
+ the record.
+
+ :param record: user's information
+ :type record: dict
+ :returns: the user's first name and last name.
+
+ .. seealso:: clean_user_names
+ .. seealso:: extract_name_from_email
+
+ """
+ lower_first_name, lower_last_name = self.clean_user_names(record)
+
+ #No first name and last name check email
+ if lower_first_name is None and lower_last_name is None:
+
+ lower_first_name, lower_last_name = \
+ self.extract_name_from_email(record)
+
+ return lower_first_name, lower_last_name
+
+ def choose_sets_chars_for_login(self, lower_first_name, lower_last_name):
+ """
+
+ Algorithm to select sets of characters from the first name and last
+ name, depending on the lenght of the last name and the maximum login
+ length which in our case is set to 8 characters.
+
+ :param lower_first_name: user's first name in lower case.
+ :param lower_last_name: usr's last name in lower case.
+ :returns: user's login
+ :rtype: string
+
+ """
+ length_last_name = len(lower_last_name)
+ self.login_max_length = 8
+
+ #Try generating a unique login based on first name and last name
+
+ if length_last_name >= self.login_max_length:
+ login = lower_last_name[0:self.login_max_length]
+ index = 0
+ logger.debug("login : %s index : %s" % (login, index))
+ elif length_last_name >= 4:
+ login = lower_last_name
+ index = 0
+ logger.debug("login : %s index : %s" % (login, index))
+ elif length_last_name == 3:
+ login = lower_first_name[0:1] + lower_last_name
+ index = 1
+ logger.debug("login : %s index : %s" % (login, index))
+ elif length_last_name == 2:
+ if len(lower_first_name) >= 2:
+ login = lower_first_name[0:2] + lower_last_name
+ index = 2
+ logger.debug("login : %s index : %s" % (login, index))
+ else:
+ logger.error("LoginException : \
+ Generation login error with \
+ minimum four characters")
+
+ else:
+ logger.error("LDAP LdapGenerateUniqueLogin failed : \
+ impossible to generate unique login for %s %s"
+ % (lower_first_name, lower_last_name))
+ return index, login
+
+ def generate_password(self):
+ """
+
+ Generate a password upon adding a new user in LDAP Directory
+ (8 characters length). The generated password is composed of characters
+ from the chars_password list.
+
+ :returns: the randomly generated password
+ :rtype: string
+
+ """
+ password = str()
+
+ length = len(self.chars_password)
+ for index in range(self.length_password):
+ char_index = random.randint(0, length - 1)
+ password += self.chars_password[char_index]
+
+ return password
+
+ @staticmethod
+ def encrypt_password(password):
+ """
+
+ Use passlib library to make a RFC2307 LDAP encrypted password salt size
+ is 8, use sha-1 algorithm.
+
+ :param password: password not encrypted.
+ :type password: string
+ :returns: Returns encrypted password.
+ :rtype: string
+
+ """
+ #Keep consistency with Java Iotlab's LDAP API
+ #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
+ return lssha.encrypt(password, salt_size=8)
+
+
+class LDAPapi:
+ """Defines functions to insert and search entries in the LDAP.
+
+ .. note:: class supposes the unix schema is used
+
+ """
+ def __init__(self):
+ logger.setLevelDebug()
+
+ #SFA related config
+
+ config = Config()
+ self.login_pwd = LoginPassword()
+ self.authname = config.SFA_REGISTRY_ROOT_AUTH
+ self.conn = ldap_co()
+ self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
+ self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
+ self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
+ self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
+ self.baseDN = self.conn.ldapPeopleDN
+ self.ldapShell = '/bin/bash'
+
+
+ def LdapGenerateUniqueLogin(self, record):
+ """
+
+ Generate login for adding a new user in LDAP Directory
+ (four characters minimum length). Get proper last name and
+ first name so that the user's login can be generated.
+
+ :param record: Record must contain first_name and last_name.
+ :type record: dict
+ :returns: the generated login for the user described with record if the
+ login generation is successful, None if it fails.
+ :rtype: string or None
+
+ """
+ #For compatibility with other ldap func
+ if 'mail' in record and 'email' not in record:
+ record['email'] = record['mail']
+
+ lower_first_name, lower_last_name = \
+ self.login_pwd.get_user_firstname_lastname(record)
+
+ index, login = self.login_pwd.choose_sets_chars_for_login(
+ lower_first_name, lower_last_name)
+
+ login_filter = '(uid=' + login + ')'
+ get_attrs = ['uid']
+ try:
+ #Check if login already in use
+
+ while (len(self.LdapSearch(login_filter, get_attrs)) is not 0):
+
+ index += 1
+ if index >= 9:
+ logger.error("LoginException : Generation login error \
+ with minimum four characters")
+ else:
+ try:
+ login = \
+ lower_first_name[0:index] + \
+ lower_last_name[0:
+ self.login_pwd.login_max_length
+ - index]
+ login_filter = '(uid=' + login + ')'
+ except KeyError:
+ print "lower_first_name - lower_last_name too short"
+
+ logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"
+ % (login))
+ return login
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" % (error))
+ return None
+
+ def find_max_uidNumber(self):
+ """Find the LDAP max uidNumber (POSIX uid attribute).
+
+ Used when adding a new user in LDAP Directory
+
+ :returns: max uidNumber + 1
+ :rtype: string
+
+ """
+ #First, get all the users in the LDAP
+ get_attrs = "(uidNumber=*)"
+ login_filter = ['uidNumber']
+
+ result_data = self.LdapSearch(get_attrs, login_filter)
+ #It there is no user in LDAP yet, First LDAP user
+ if result_data == []:
+ max_uidnumber = self.ldapUserUidNumberMin
+ #Otherwise, get the highest uidNumber
+ else:
+ uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]
+ logger.debug("LDAPapi.py \tfind_max_uidNumber \
+ uidNumberList %s " % (uidNumberList))
+ max_uidnumber = max(uidNumberList) + 1
+
+ return str(max_uidnumber)
+
+
+ def get_ssh_pkey(self, record):
+ """TODO ; Get ssh public key from sfa record
+ To be filled by N. Turro ? or using GID pl way?
+
+ """
+ return 'A REMPLIR '
+
+ @staticmethod
+ #TODO Handle OR filtering in the ldap query when
+ #dealing with a list of records instead of doing a for loop in GetPersons
+ def make_ldap_filters_from_record(record=None):
+ """Helper function to make LDAP filter requests out of SFA records.
+
+ :param record: user's sfa record. Should contain first_name,last_name,
+ email or mail, and if the record is enabled or not. If the dict
+ record does not have all of these, must at least contain the user's
+ email.
+ :type record: dict
+ :returns: LDAP request
+ :rtype: string
+
+ """
+ req_ldap = ''
+ req_ldapdict = {}
+ if record :
+ if 'first_name' in record and 'last_name' in record:
+ if record['first_name'] != record['last_name']:
+ req_ldapdict['cn'] = str(record['first_name'])+" "\
+ + str(record['last_name'])
+ if 'email' in record:
+ req_ldapdict['mail'] = record['email']
+ if 'mail' in record:
+ req_ldapdict['mail'] = record['mail']
+ if 'enabled' in record:
+ if record['enabled'] is True:
+ req_ldapdict['shadowExpire'] = '-1'
+ else:
+ req_ldapdict['shadowExpire'] = '0'
+
+ #Hrn should not be part of the filter because the hrn
+ #presented by a certificate of a SFA user not imported in
+ #Iotlab does not include the iotlab login in it
+ #Plus, the SFA user may already have an account with iotlab
+ #using another login.
+
+ logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
+ record %s req_ldapdict %s"
+ % (record, req_ldapdict))
+
+ for k in req_ldapdict:
+ req_ldap += '(' + str(k) + '=' + str(req_ldapdict[k]) + ')'
+ if len(req_ldapdict.keys()) >1 :
+ req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
+ size = len(req_ldap)
+ req_ldap = req_ldap[:(size-1)] + ')' + req_ldap[(size-1):]
+ else:
+ req_ldap = "(cn=*)"
+
+ return req_ldap
+
+ def make_ldap_attributes_from_record(self, record):
+ """
+
+ When adding a new user to Iotlab's LDAP, creates an attributes
+ dictionnary from the SFA record understandable by LDAP. Generates the
+ user's LDAP login.User is automatically validated (account enabled)
+ and described as a SFA USER FROM OUTSIDE IOTLAB.
+
+ :param record: must contain the following keys and values:
+ first_name, last_name, mail, pkey (ssh key).
+ :type record: dict
+ :returns: dictionary of attributes using LDAP data structure model.
+ :rtype: dict
+
+ """
+
+ attrs = {}
+ attrs['objectClass'] = ["top", "person", "inetOrgPerson",
+ "organizationalPerson", "posixAccount",
+ "shadowAccount", "systemQuotas",
+ "ldapPublicKey"]
+
+ attrs['uid'] = self.LdapGenerateUniqueLogin(record)
+ try:
+ attrs['givenName'] = str(record['first_name']).lower().capitalize()
+ attrs['sn'] = str(record['last_name']).lower().capitalize()
+ attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
+ attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
+
+ except KeyError:
+ attrs['givenName'] = attrs['uid']
+ attrs['sn'] = attrs['uid']
+ attrs['cn'] = attrs['uid']
+ attrs['gecos'] = attrs['uid']
+
+ attrs['quota'] = self.ldapUserQuotaNFS
+ attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
+ attrs['loginShell'] = self.ldapShell
+ attrs['gidNumber'] = self.ldapUserGidNumber
+ attrs['uidNumber'] = self.find_max_uidNumber()
+ attrs['mail'] = record['mail'].lower()
+ try:
+ attrs['sshPublicKey'] = record['pkey']
+ except KeyError:
+ attrs['sshPublicKey'] = self.get_ssh_pkey(record)
+
+
+ #Password is automatically generated because SFA user don't go
+ #through the Iotlab website used to register new users,
+ #There is no place in SFA where users can enter such information
+ #yet.
+ #If the user wants to set his own password , he must go to the Iotlab
+ #website.
+ password = self.login_pwd.generate_password()
+ attrs['userPassword'] = self.login_pwd.encrypt_password(password)
+
+ #Account automatically validated (no mail request to admins)
+ #Set to 0 to disable the account, -1 to enable it,
+ attrs['shadowExpire'] = '-1'
+
+ #Motivation field in Iotlab
+ attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
+
+ attrs['ou'] = 'SFA' #Optional: organizational unit
+ #No info about those here:
+ attrs['l'] = 'To be defined'#Optional: Locality.
+ attrs['st'] = 'To be defined' #Optional: state or province (country).
+
+ return attrs
+
+
+
+ def LdapAddUser(self, record) :
+ """Add SFA user to LDAP if it is not in LDAP yet.
+
+ :param record: dictionnary with the user's data.
+ :returns: a dictionary with the status (Fail= False, Success= True)
+ and the uid of the newly added user if successful, or the error
+ message it is not. Dict has keys bool and message in case of
+ failure, and bool uid in case of success.
+ :rtype: dict
+
+ .. seealso:: make_ldap_filters_from_record
+
+ """
+ logger.debug(" \r\n \t LDAP LdapAddUser \r\n\r\n ================\r\n ")
+ user_ldap_attrs = self.make_ldap_attributes_from_record(record)
+
+ #Check if user already in LDAP wih email, first name and last name
+ filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
+ user_exist = self.LdapSearch(filter_by)
+ if user_exist:
+ logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
+ already exists" % (user_ldap_attrs['sn'],
+ user_ldap_attrs['mail']))
+ return {'bool': False}
+
+ #Bind to the server
+ result = self.conn.connect()
+
+ if(result['bool']):
+
+ # A dict to help build the "body" of the object
+ logger.debug(" \r\n \t LDAP LdapAddUser attrs %s "
+ % user_ldap_attrs)
+
+ # The dn of our new entry/object
+ dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
+
+ try:
+ ldif = modlist.addModlist(user_ldap_attrs)
+ logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"
+ % (user_ldap_attrs, ldif))
+ self.conn.ldapserv.add_s(dn, ldif)
+
+ logger.info("Adding user %s login %s in LDAP"
+ % (user_ldap_attrs['cn'], user_ldap_attrs['uid']))
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP Add Error %s" % error)
+ return {'bool': False, 'message': error}
+
+ self.conn.close()
+ return {'bool': True, 'uid': user_ldap_attrs['uid']}
+ else:
+ return result
+
+ def LdapDelete(self, person_dn):
+ """Deletes a person in LDAP. Uses the dn of the user.
+
+ :param person_dn: user's ldap dn.
+ :type person_dn: string
+ :returns: dictionary with bool True if successful, bool False
+ and the error if not.
+ :rtype: dict
+
+ """
+ #Connect and bind
+ result = self.conn.connect()
+ if(result['bool']):
+ try:
+ self.conn.ldapserv.delete_s(person_dn)
+ self.conn.close()
+ return {'bool': True}
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP Delete Error %s" % error)
+ return {'bool': False, 'message': error}
+
+ def LdapDeleteUser(self, record_filter):
+ """Deletes a SFA person in LDAP, based on the user's hrn.
+
+ :param record_filter: Filter to find the user to be deleted. Must
+ contain at least the user's email.
+ :type record_filter: dict
+ :returns: dict with bool True if successful, bool False and error
+ message otherwise.
+ :rtype: dict
+
+ .. seealso:: LdapFindUser docstring for more info on record filter.
+ .. seealso:: LdapDelete for user deletion
+
+ """
+ #Find uid of the person
+ person = self.LdapFindUser(record_filter, [])
+ logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s"
+ % (record_filter, person))
+
+ if person:
+ dn = 'uid=' + person['uid'] + "," + self.baseDN
+ else:
+ return {'bool': False}
+
+ result = self.LdapDelete(dn)
+ return result
+
+ def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
+ """ Modifies a LDAP entry, replaces user's old attributes with
+ the new ones given.
+
+ :param dn: user's absolute name in the LDAP hierarchy.
+ :param old_attributes_dict: old user's attributes. Keys must match
+ the ones used in the LDAP model.
+ :param new_attributes_dict: new user's attributes. Keys must match
+ the ones used in the LDAP model.
+ :type dn: string
+ :type old_attributes_dict: dict
+ :type new_attributes_dict: dict
+ :returns: dict bool True if Successful, bool False if not.
+ :rtype: dict
+
+ """
+
+ ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
+ # Connect and bind/authenticate
+ result = self.conn.connect()
+ if (result['bool']):
+ try:
+ self.conn.ldapserv.modify_s(dn, ldif)
+ self.conn.close()
+ return {'bool': True}
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapModify Error %s" % error)
+ return {'bool': False}
+
+
+ def LdapModifyUser(self, user_record, new_attributes_dict):
+ """
+
+ Gets the record from one user based on the user sfa recordand changes
+ the attributes according to the specified new_attributes. Do not use
+ this if we need to modify the uid. Use a ModRDN operation instead
+ ( modify relative DN ).
+
+ :param user_record: sfa user record.
+ :param new_attributes_dict: new user attributes, keys must be the
+ same as the LDAP model.
+ :type user_record: dict
+ :type new_attributes_dict: dict
+ :returns: bool True if successful, bool False if not.
+ :rtype: dict
+
+ .. seealso:: make_ldap_filters_from_record for info on what is mandatory
+ in the user_record.
+ .. seealso:: make_ldap_attributes_from_record for the LDAP objectclass.
+
+ """
+ if user_record is None:
+ logger.error("LDAP \t LdapModifyUser Need user record ")
+ return {'bool': False}
+
+ #Get all the attributes of the user_uid_login
+ #person = self.LdapFindUser(record_filter,[])
+ req_ldap = self.make_ldap_filters_from_record(user_record)
+ person_list = self.LdapSearch(req_ldap, [])
+ logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s"
+ % (person_list))
+
+ if person_list and len(person_list) > 1:
+ logger.error("LDAP \t LdapModifyUser Too many users returned")
+ return {'bool': False}
+ if person_list is None:
+ logger.error("LDAP \t LdapModifyUser User %s doesn't exist "
+ % (user_record))
+ return {'bool': False}
+
+ # The dn of our existing entry/object
+ #One result only from ldapSearch
+ person = person_list[0][1]
+ dn = 'uid=' + person['uid'][0] + "," + self.baseDN
+
+ if new_attributes_dict:
+ old = {}
+ for k in new_attributes_dict:
+ if k not in person:
+ old[k] = ''
+ else:
+ old[k] = person[k]
+ logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"
+ % (new_attributes_dict))
+ result = self.LdapModify(dn, old, new_attributes_dict)
+ return result
+ else:
+ logger.error("LDAP \t LdapModifyUser No new attributes given. ")
+ return {'bool': False}
+
+
+ def LdapMarkUserAsDeleted(self, record):
+ """
+
+ Sets shadowExpire to 0, disabling the user in LDAP. Calls LdapModifyUser
+ to change the shadowExpire of the user.
+
+ :param record: the record of the user who has to be disabled.
+ Should contain first_name,last_name, email or mail, and if the
+ record is enabled or not. If the dict record does not have all of
+ these, must at least contain the user's email.
+ :type record: dict
+ :returns: {bool: True} if successful or {bool: False} if not
+ :rtype: dict
+
+ .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
+ """
+
+ new_attrs = {}
+ #Disable account
+ new_attrs['shadowExpire'] = '0'
+ logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
+ ret = self.LdapModifyUser(record, new_attrs)
+ return ret
+
+ def LdapResetPassword(self, record):
+ """Resets password for the user whose record is the parameter and
+ changes the corresponding entry in the LDAP.
+
+ :param record: user's sfa record whose Ldap password must be reset.
+ Should contain first_name,last_name,
+ email or mail, and if the record is enabled or not. If the dict
+ record does not have all of these, must at least contain the user's
+ email.
+ :type record: dict
+ :returns: return value of LdapModifyUser. True if successful, False
+ otherwise.
+
+ .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
+
+ """
+ password = self.login_pwd.generate_password()
+ attrs = {}
+ attrs['userPassword'] = self.login_pwd.encrypt_password(password)
+ logger.debug("LDAP LdapResetPassword encrypt_password %s"
+ % (attrs['userPassword']))
+ result = self.LdapModifyUser(record, attrs)
+ return result
+
+
+ def LdapSearch(self, req_ldap=None, expected_fields=None):
+ """
+ Used to search directly in LDAP, by using ldap filters and return
+ fields. When req_ldap is None, returns all the entries in the LDAP.
+
+ :param req_ldap: ldap style request, with appropriate filters,
+ example: (cn=*).
+ :param expected_fields: Fields in the user ldap entry that has to be
+ returned. If None is provided, will return 'mail', 'givenName',
+ 'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
+ :type req_ldap: string
+ :type expected_fields: list
+
+ .. seealso:: make_ldap_filters_from_record for req_ldap format.
+
+ """
+ result = self.conn.connect(bind=False)
+ if (result['bool']):
+
+ return_fields_list = []
+ if expected_fields is None:
+ return_fields_list = ['mail', 'givenName', 'sn', 'uid',
+ 'sshPublicKey', 'shadowExpire']
+ else:
+ return_fields_list = expected_fields
+ #No specifc request specified, get the whole LDAP
+ if req_ldap is None:
+ req_ldap = '(cn=*)'
+
+ logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
+ return_fields_list %s" \
+ %(req_ldap, return_fields_list))
+
+ try:
+ msg_id = self.conn.ldapserv.search(
+ self.baseDN, ldap.SCOPE_SUBTREE,
+ req_ldap, return_fields_list)
+ #Get all the results matching the search from ldap in one
+ #shot (1 value)
+ result_type, result_data = \
+ self.conn.ldapserv.result(msg_id, 1)
+
+ self.conn.close()
+
+ logger.debug("LDAP.PY \t LdapSearch result_data %s"
+ % (result_data))
+
+ return result_data
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapSearch Error %s" % error)
+ return []
+
+ else:
+ logger.error("LDAP.PY \t Connection Failed")
+ return
+
+ def _process_ldap_info_for_all_users(self, result_data):
+ """Process the data of all enabled users in LDAP.
+
+ :param result_data: Contains information of all enabled users in LDAP
+ and is coming from LdapSearch.
+ :param result_data: list
+
+ .. seealso:: LdapSearch
+
+ """
+ results = []
+ logger.debug(" LDAP.py _process_ldap_info_for_all_users result_data %s "
+ % (result_data))
+ for ldapentry in result_data:
+ logger.debug(" LDAP.py _process_ldap_info_for_all_users \
+ ldapentry name : %s " % (ldapentry[1]['uid'][0]))
+ tmpname = ldapentry[1]['uid'][0]
+ hrn = self.authname + "." + tmpname
+
+ tmpemail = ldapentry[1]['mail'][0]
+ if ldapentry[1]['mail'][0] == "unknown":
+ tmpemail = None
+
+ try:
+ results.append({
+ 'type': 'user',
+ 'pkey': ldapentry[1]['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry[1]['givenName'][0],
+ 'last_name': ldapentry[1]['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer': -1,
+ 'hrn': hrn,
+ })
+ except KeyError, error:
+ logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s"
+ % (error))
+ return
+
+ return results
+
+ def _process_ldap_info_for_one_user(self, record, result_data):
+ """
+
+ Put the user's ldap data into shape. Only deals with one user
+ record and one user data from ldap.
+
+ :param record: user record
+ :param result_data: Raw ldap data coming from LdapSearch
+ :returns: user's data dict with 'type','pkey','uid', 'email',
+ 'first_name' 'last_name''serial''authority''peer_authority'
+ 'pointer''hrn'
+ :type record: dict
+ :type result_data: list
+ :rtype :dict
+
+ """
+ #One entry only in the ldap data because we used a filter
+ #to find one user only
+ ldapentry = result_data[0][1]
+ logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" % (ldapentry))
+ tmpname = ldapentry['uid'][0]
+
+ tmpemail = ldapentry['mail'][0]
+ if ldapentry['mail'][0] == "unknown":
+ tmpemail = None
+
+ parent_hrn = None
+ peer_authority = None
+ if 'hrn' in record:
+ hrn = record['hrn']
+ parent_hrn = get_authority(hrn)
+ if parent_hrn != self.authname:
+ peer_authority = parent_hrn
+ #In case the user was not imported from Iotlab LDAP
+ #but from another federated site, has an account in
+ #iotlab but currently using his hrn from federated site
+ #then the login is different from the one found in its hrn
+ if tmpname != hrn.split('.')[1]:
+ hrn = None
+ else:
+ hrn = None
+
+ results = {
+ 'type': 'user',
+ 'pkey': ldapentry['sshPublicKey'],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname,
+ 'email': tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry['givenName'][0],
+ 'last_name': ldapentry['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': parent_hrn,
+ 'peer_authority': peer_authority,
+ 'pointer': -1,
+ 'hrn': hrn,
+ }
+ return results
+
+ def LdapFindUser(self, record=None, is_user_enabled=None,
+ expected_fields=None):
+ """
+
+ Search a SFA user with a hrn. User should be already registered
+ in Iotlab LDAP.
+
+ :param record: sfa user's record. Should contain first_name,last_name,
+ email or mail. If no record is provided, returns all the users found
+ in LDAP.
+ :type record: dict
+ :param is_user_enabled: is the user's iotlab account already valid.
+ :type is_user_enabled: Boolean.
+ :returns: LDAP entries from ldap matching the filter provided. Returns
+ a single entry if one filter has been given and a list of
+ entries otherwise.
+ :rtype: dict or list
+
+ """
+ custom_record = {}
+ if is_user_enabled:
+ custom_record['enabled'] = is_user_enabled
+ if record:
+ custom_record.update(record)
+
+ req_ldap = self.make_ldap_filters_from_record(custom_record)
+ return_fields_list = []
+ if expected_fields is None:
+ return_fields_list = ['mail', 'givenName', 'sn', 'uid',
+ 'sshPublicKey']
+ else:
+ return_fields_list = expected_fields
+
+ result_data = self.LdapSearch(req_ldap, return_fields_list)
+ logger.debug("LDAP.PY \t LdapFindUser result_data %s" % (result_data))
+
+ if len(result_data) == 0:
+ return None
+ #Asked for a specific user
+ if record is not None:
+ results = self._process_ldap_info_for_one_user(record, result_data)
+
+ else:
+ #Asked for all users in ldap
+ results = self._process_ldap_info_for_all_users(result_data)
+ return results
\ No newline at end of file
--- /dev/null
+"""
+File used to handle issuing request to OAR and parse OAR's JSON responses.
+Contains the following classes:
+- JsonPage : handles multiple pages OAR answers.
+- OARRestapi : handles issuing POST or GET requests to OAR.
+- ParsingResourcesFull : dedicated to parsing OAR's answer to a get resources
+full request.
+- OARGETParser : handles parsing the Json answers to different GET requests.
+
+"""
+from httplib import HTTPConnection, HTTPException, NotConnected
+import json
+from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+import os.path
+
+
+class JsonPage:
+
+ """Class used to manipulate json pages given by OAR.
+
+ In case the json answer from a GET request is too big to fit in one json
+ page, this class provides helper methods to retrieve all the pages and
+ store them in a list before putting them into one single json dictionary,
+ facilitating the parsing.
+
+ """
+
+ def __init__(self):
+ """Defines attributes to manipulate and parse the json pages.
+
+ """
+ #All are boolean variables
+ self.concatenate = False
+ #Indicates end of data, no more pages to be loaded.
+ self.end = False
+ self.next_page = False
+ #Next query address
+ self.next_offset = None
+ #Json page
+ self.raw_json = None
+
+ def FindNextPage(self):
+ """
+ Gets next data page from OAR when the query's results are too big to
+ be transmitted in a single page. Uses the "links' item in the json
+ returned to check if an additionnal page has to be loaded. Updates
+ object attributes next_page, next_offset, and end.
+
+ """
+ if "links" in self.raw_json:
+ for page in self.raw_json['links']:
+ if page['rel'] == 'next':
+ self.concatenate = True
+ self.next_page = True
+ self.next_offset = "?" + page['href'].split("?")[1]
+ return
+
+ if self.concatenate:
+ self.end = True
+ self.next_page = False
+ self.next_offset = None
+
+ return
+
+ #Otherwise, no next page and no concatenate, must be a single page
+ #Concatenate the single page and get out of here.
+ else:
+ self.next_page = False
+ self.concatenate = True
+ self.next_offset = None
+ return
+
+ @staticmethod
+ def ConcatenateJsonPages(saved_json_list):
+ """
+ If the json answer is too big to be contained in a single page,
+ all the pages have to be loaded and saved before being appended to the
+ first page.
+
+ :param saved_json_list: list of all the stored pages, including the
+ first page.
+ :type saved_json_list: list
+ :returns: Returns a dictionary with all the pages saved in the
+ saved_json_list. The key of the dictionary is 'items'.
+ :rtype: dict
+
+
+ .. seealso:: SendRequest
+ .. warning:: Assumes the apilib is 0.2.10 (with the 'items' key in the
+ raw json dictionary)
+
+ """
+ #reset items list
+
+ tmp = {}
+ tmp['items'] = []
+
+ for page in saved_json_list:
+ tmp['items'].extend(page['items'])
+ return tmp
+
+ def ResetNextPage(self):
+ """
+ Resets all the Json page attributes (next_page, next_offset,
+ concatenate, end). Has to be done before getting another json answer
+ so that the previous page status does not affect the new json load.
+
+ """
+ self.next_page = True
+ self.next_offset = None
+ self.concatenate = False
+ self.end = False
+
+
+class OARrestapi:
+ """Class used to connect to the OAR server and to send GET and POST
+ requests.
+
+ """
+
+ # classes attributes
+
+ OAR_REQUEST_POST_URI_DICT = {'POST_job': {'uri': '/oarapi/jobs.json'},
+ 'DELETE_jobs_id':
+ {'uri': '/oarapi/jobs/id.json'},
+ }
+
+ POST_FORMAT = {'json': {'content': "application/json", 'object': json}}
+
+ #OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
+ #'workdir':"/home/", 'walltime':""}
+
+ def __init__(self, config_file='/etc/sfa/oar_config.py'):
+ self.oarserver = {}
+ self.oarserver['uri'] = None
+ self.oarserver['postformat'] = 'json'
+
+ try:
+ execfile(config_file, self.__dict__)
+
+ self.config_file = config_file
+ # path to configuration data
+ self.config_path = os.path.dirname(config_file)
+
+ except IOError:
+ raise IOError, "Could not find or load the configuration file: %s" \
+ % config_file
+ #logger.setLevelDebug()
+ self.oarserver['ip'] = self.OAR_IP
+ self.oarserver['port'] = self.OAR_PORT
+ self.jobstates = ['Terminated', 'Hold', 'Waiting', 'toLaunch',
+ 'toError', 'toAckReservation', 'Launching',
+ 'Finishing', 'Running', 'Suspended', 'Resuming',
+ 'Error']
+
+ self.parser = OARGETParser(self)
+
+
+ def GETRequestToOARRestAPI(self, request, strval=None,
+ next_page=None, username=None):
+
+ """Makes a GET request to OAR.
+
+ Fetch the uri associated with the resquest stored in
+ OARrequests_uri_dict, adds the username if needed and if available, adds
+ strval to the request uri if needed, connects to OAR and issues the GET
+ request. Gets the json reply.
+
+ :param request: One of the known get requests that are keys in the
+ OARrequests_uri_dict.
+ :param strval: used when a job id has to be specified.
+ :param next_page: used to tell OAR to send the next page for this
+ Get request. Is appended to the GET uri.
+ :param username: used when a username has to be specified, when looking
+ for jobs scheduled by a particular user for instance.
+
+ :type request: string
+ :type strval: integer
+ :type next_page: boolean
+ :type username: string
+ :returns: a json dictionary if OAR successfully processed the GET
+ request.
+
+ .. seealso:: OARrequests_uri_dict
+ """
+ self.oarserver['uri'] = \
+ OARGETParser.OARrequests_uri_dict[request]['uri']
+ #Get job details with username
+ if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
+ self.oarserver['uri'] += \
+ OARGETParser.OARrequests_uri_dict[request]['owner'] + username
+ headers = {}
+ data = json.dumps({})
+ logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" % (request))
+ if strval:
+ self.oarserver['uri'] = self.oarserver['uri'].\
+ replace("id", str(strval))
+
+ if next_page:
+ self.oarserver['uri'] += next_page
+
+ if username:
+ headers['X-REMOTE_IDENT'] = username
+
+ logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
+ self.oarserver['uri'] %s strval %s"
+ % (self.oarserver['uri'], strval))
+ try:
+ #seems that it does not work if we don't add this
+ headers['content-length'] = '0'
+
+ conn = HTTPConnection(self.oarserver['ip'],
+ self.oarserver['port'])
+ conn.request("GET", self.oarserver['uri'], data, headers)
+ resp = conn.getresponse()
+ body = resp.read()
+ except Exception as error:
+ logger.log_exc("GET_OAR_SRVR : Connection error: %s "
+ % (error))
+ raise Exception ("GET_OAR_SRVR : Connection error %s " %(error))
+
+ finally:
+ conn.close()
+
+ # except HTTPException, error:
+ # logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s "
+ # % (error))
+ #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+ if resp.status >= 400:
+ raise ValueError ("Response Error %s, %s" %(resp.status,
+ resp.reason))
+ try:
+ js_dict = json.loads(body)
+ #print "\r\n \t\t\t js_dict keys" , js_dict.keys(), " \r\n", js_dict
+ return js_dict
+
+ except ValueError, error:
+ logger.log_exc("Failed to parse Server Response: %s ERROR %s"
+ % (body, error))
+ #raise ServerError("Failed to parse Server Response:" + js)
+
+
+ def POSTRequestToOARRestAPI(self, request, datadict, username=None):
+ """ Used to post a job on OAR , along with data associated
+ with the job.
+
+ """
+
+ #first check that all params for are OK
+ try:
+ self.oarserver['uri'] = \
+ self.OAR_REQUEST_POST_URI_DICT[request]['uri']
+
+ except KeyError:
+ logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
+ valid")
+ return
+ if datadict and 'strval' in datadict:
+ self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
+ str(datadict['strval']))
+ del datadict['strval']
+
+ data = json.dumps(datadict)
+ headers = {'X-REMOTE_IDENT':username, \
+ 'content-type': self.POST_FORMAT['json']['content'], \
+ 'content-length':str(len(data))}
+ try :
+
+ conn = HTTPConnection(self.oarserver['ip'], \
+ self.oarserver['port'])
+ conn.request("POST", self.oarserver['uri'], data, headers)
+ resp = conn.getresponse()
+ body = resp.read()
+
+ except NotConnected:
+ logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
+ data %s \r\n \t\n \t\t headers %s uri %s" \
+ %(data,headers,self.oarserver['uri']))
+ except Exception as error:
+ logger.log_exc("POST_OAR_SERVER : Connection error: %s "
+ % (error))
+ raise Exception ("POST_OAR_SERVER : Connection error %s " %(error))
+
+ finally:
+ conn.close()
+
+ if resp.status >= 400:
+ raise ValueError ("Response Error %s, %s" %(resp.status,
+ resp.reason))
+
+
+ try:
+ answer = json.loads(body)
+ logger.debug("POSTRequestToOARRestAPI : answer %s" % (answer))
+ return answer
+
+ except ValueError, error:
+ logger.log_exc("Failed to parse Server Response: error %s \
+ %s" %(error))
+ #raise ServerError("Failed to parse Server Response:" + answer)
+
+
+class ParsingResourcesFull():
+ """
+ Class dedicated to parse the json response from a GET_resources_full from
+ OAR.
+
+ """
+ def __init__(self):
+ """
+ Set the parsing dictionary. Works like a switch case, if the key is
+ found in the dictionary, then the associated function is called.
+ This is used in ParseNodes to create an usable dictionary from
+ the Json returned by OAR when issuing a GET resources full request.
+
+ .. seealso:: ParseNodes
+
+ """
+ self.resources_fulljson_dict = {
+ 'network_address': self.AddNodeNetworkAddr,
+ 'site': self.AddNodeSite,
+ # 'radio': self.AddNodeRadio,
+ 'mobile': self.AddMobility,
+ 'x': self.AddPosX,
+ 'y': self.AddPosY,
+ 'z': self.AddPosZ,
+ 'archi': self.AddHardwareType,
+ 'state': self.AddBootState,
+ 'id': self.AddOarNodeId,
+ 'mobility_type': self.AddMobilityType,
+ }
+
+
+
+ def AddOarNodeId(self, tuplelist, value):
+ """Adds Oar internal node id to the nodes' attributes.
+
+ Appends tuple ('oar_id', node_id) to the tuplelist. Used by ParseNodes.
+
+ .. seealso:: ParseNodes
+
+ """
+
+ tuplelist.append(('oar_id', int(value)))
+
+
+ def AddNodeNetworkAddr(self, dictnode, value):
+ """First parsing function to be called to parse the json returned by OAR
+ answering a GET_resources (/oarapi/resources.json) request.
+
+ When a new node is found in the json, this function is responsible for
+ creating a new entry in the dictionary for storing information on this
+ specific node. The key is the node network address, which is also the
+ node's hostname.
+ The value associated with the key is a tuple list.It contains all
+ the nodes attributes. The tuplelist will later be turned into a dict.
+
+ :param dictnode: should be set to the OARGETParser atribute
+ node_dictlist. It will store the information on the nodes.
+ :param value: the node_id is the network_address in the raw json.
+ :type value: string
+ :type dictnode: dictionary
+
+ .. seealso: ParseResources, ParseNodes
+ """
+
+ node_id = value
+ dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]
+
+ return node_id
+
+ def AddNodeSite(self, tuplelist, value):
+ """Add the site's node to the dictionary.
+
+
+ :param tuplelist: tuple list on which to add the node's site.
+ Contains the other node attributes as well.
+ :param value: value to add to the tuple list, in this case the node's
+ site.
+ :type tuplelist: list
+ :type value: string
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+ tuplelist.append(('site', str(value)))
+
+ # def AddNodeRadio(tuplelist, value):
+ # """Add thenode's radio chipset type to the tuple list.
+
+ # :param tuplelist: tuple list on which to add the node's mobility
+ # status. The tuplelist is the value associated with the node's
+ # id in the OARGETParser
+ # 's dictionary node_dictlist.
+ # :param value: name of the radio chipset on the node.
+ # :type tuplelist: list
+ # :type value: string
+
+ # .. seealso:: AddNodeNetworkAddr
+
+ # """
+ # tuplelist.append(('radio', str(value)))
+
+ def AddMobilityType(self, tuplelist, value):
+ """Adds which kind of mobility it is, train or roomba robot.
+
+ :param tuplelist: tuple list on which to add the node's mobility status.
+ The tuplelist is the value associated with the node's id in the
+ OARGETParser's dictionary node_dictlist.
+ :param value: tells if a node is a mobile node or not. The value is
+ found in the json.
+
+ :type tuplelist: list
+ :type value: integer
+
+ """
+ tuplelist.append(('mobility_type', str(value)))
+
+
+ def AddMobility(self, tuplelist, value):
+ """Add if the node is a mobile node or not to the tuple list.
+
+ :param tuplelist: tuple list on which to add the node's mobility status.
+ The tuplelist is the value associated with the node's id in the
+ OARGETParser's dictionary node_dictlist.
+ :param value: tells if a node is a mobile node or not. The value is found
+ in the json.
+
+ :type tuplelist: list
+ :type value: integer
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+ if value is 0:
+ tuplelist.append(('mobile', 'False'))
+ else:
+ tuplelist.append(('mobile', 'True'))
+
+
+ def AddPosX(self, tuplelist, value):
+ """Add the node's position on the x axis.
+
+ :param tuplelist: tuple list on which to add the node's position . The
+ tuplelist is the value associated with the node's id in the
+ OARGETParser's dictionary node_dictlist.
+ :param value: the position x.
+
+ :type tuplelist: list
+ :type value: integer
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+ tuplelist.append(('posx', value ))
+
+
+
+ def AddPosY(self, tuplelist, value):
+ """Add the node's position on the y axis.
+
+ :param tuplelist: tuple list on which to add the node's position . The
+ tuplelist is the value associated with the node's id in the
+ OARGETParser's dictionary node_dictlist.
+ :param value: the position y.
+
+ :type tuplelist: list
+ :type value: integer
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+ tuplelist.append(('posy', value))
+
+
+
+ def AddPosZ(self, tuplelist, value):
+ """Add the node's position on the z axis.
+
+ :param tuplelist: tuple list on which to add the node's position . The
+ tuplelist is the value associated with the node's id in the
+ OARGETParser's dictionary node_dictlist.
+ :param value: the position z.
+
+ :type tuplelist: list
+ :type value: integer
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+
+ tuplelist.append(('posz', value))
+
+
+
+ def AddBootState(tself, tuplelist, value):
+ """Add the node's state, Alive or Suspected.
+
+ :param tuplelist: tuple list on which to add the node's state . The
+ tuplelist is the value associated with the node's id in the
+ OARGETParser 's dictionary node_dictlist.
+ :param value: node's state.
+
+ :type tuplelist: list
+ :type value: string
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+ tuplelist.append(('boot_state', str(value)))
+
+
+ def AddHardwareType(self, tuplelist, value):
+ """Add the node's hardware model and radio chipset type to the tuple
+ list.
+
+ :param tuplelist: tuple list on which to add the node's architecture
+ and radio chipset type.
+ :param value: hardware type: radio chipset. The value contains both the
+ architecture and the radio chipset, separated by a colon.
+ :type tuplelist: list
+ :type value: string
+
+ .. seealso:: AddNodeNetworkAddr
+
+ """
+
+ value_list = value.split(':')
+ tuplelist.append(('archi', value_list[0]))
+ tuplelist.append(('radio', value_list[1]))
+
+
+class OARGETParser:
+ """Class providing parsing methods associated to specific GET requests.
+
+ """
+
+ def __init__(self, srv):
+ self.version_json_dict = {
+ 'api_version': None, 'apilib_version': None,
+ 'api_timezone': None, 'api_timestamp': None, 'oar_version': None}
+ self.config = Config()
+ self.interface_hrn = self.config.SFA_INTERFACE_HRN
+ self.timezone_json_dict = {
+ 'timezone': None, 'api_timestamp': None, }
+ #self.jobs_json_dict = {
+ #'total' : None, 'links' : [],\
+ #'offset':None , 'items' : [], }
+ #self.jobs_table_json_dict = self.jobs_json_dict
+ #self.jobs_details_json_dict = self.jobs_json_dict
+ self.server = srv
+ self.node_dictlist = {}
+
+ self.json_page = JsonPage()
+ self.parsing_resourcesfull = ParsingResourcesFull()
+ self.site_dict = {}
+ self.jobs_list = []
+ self.SendRequest("GET_version")
+
+
+ def ParseVersion(self):
+ """Parses the OAR answer to the GET_version ( /oarapi/version.json.)
+
+ Finds the OAR apilib version currently used. Has an impact on the json
+ structure returned by OAR, so the version has to be known before trying
+ to parse the jsons returned after a get request has been issued.
+ Updates the attribute version_json_dict.
+
+ """
+
+ if 'oar_version' in self.json_page.raw_json:
+ self.version_json_dict.update(
+ api_version=self.json_page.raw_json['api_version'],
+ apilib_version=self.json_page.raw_json['apilib_version'],
+ api_timezone=self.json_page.raw_json['api_timezone'],
+ api_timestamp=self.json_page.raw_json['api_timestamp'],
+ oar_version=self.json_page.raw_json['oar_version'])
+ else:
+ self.version_json_dict.update(
+ api_version=self.json_page.raw_json['api'],
+ apilib_version=self.json_page.raw_json['apilib'],
+ api_timezone=self.json_page.raw_json['api_timezone'],
+ api_timestamp=self.json_page.raw_json['api_timestamp'],
+ oar_version=self.json_page.raw_json['oar'])
+
+ print self.version_json_dict['apilib_version']
+
+
+ def ParseTimezone(self):
+ """Get the timezone used by OAR.
+
+ Get the timezone from the answer to the GET_timezone request.
+ :return: api_timestamp and api timezone.
+ :rype: integer, integer
+
+ .. warning:: unused.
+ """
+ api_timestamp = self.json_page.raw_json['api_timestamp']
+ api_tz = self.json_page.raw_json['timezone']
+ return api_timestamp, api_tz
+
+ def ParseJobs(self):
+ """Called when a GET_jobs request has been issued to OAR.
+
+ Corresponds to /oarapi/jobs.json uri. Currently returns the raw json
+ information dict.
+ :returns: json_page.raw_json
+ :rtype: dictionary
+
+ .. warning:: Does not actually parse the information in the json. SA
+ 15/07/13.
+
+ """
+ self.jobs_list = []
+ print " ParseJobs "
+ return self.json_page.raw_json
+
+ def ParseJobsTable(self):
+ """In case we need to use the job table in the future.
+
+ Associated with the GET_jobs_table : '/oarapi/jobs/table.json uri.
+ .. warning:: NOT USED. DOES NOTHING.
+ """
+ print "ParseJobsTable"
+
+ def ParseJobsDetails(self):
+ """Currently only returns the same json in self.json_page.raw_json.
+
+ .. todo:: actually parse the json
+ .. warning:: currently, this function is not used a lot, so I have no
+ idea what could be useful to parse, returning the full json. NT
+ """
+
+ #logger.debug("ParseJobsDetails %s " %(self.json_page.raw_json))
+ return self.json_page.raw_json
+
+
+ def ParseJobsIds(self):
+ """Associated with the GET_jobs_id OAR request.
+
+ Parses the json dict (OAR answer) to the GET_jobs_id request
+ /oarapi/jobs/id.json.
+
+
+ :returns: dictionary whose keys are listed in the local variable
+ job_resources and values that are in the json dictionary returned
+ by OAR with the job information.
+ :rtype: dict
+
+ """
+ job_resources = ['wanted_resources', 'name', 'id', 'start_time',
+ 'state', 'owner', 'walltime', 'message']
+
+ # Unused variable providing the contents of the json dict returned from
+ # get job resources full request
+ job_resources_full = [
+ 'launching_directory', 'links',
+ 'resubmit_job_id', 'owner', 'events', 'message',
+ 'scheduled_start', 'id', 'array_id', 'exit_code',
+ 'properties', 'state', 'array_index', 'walltime',
+ 'type', 'initial_request', 'stop_time', 'project',
+ 'start_time', 'dependencies', 'api_timestamp', 'submission_time',
+ 'reservation', 'stdout_file', 'types', 'cpuset_name',
+ 'name', 'wanted_resources', 'queue', 'stderr_file', 'command']
+
+
+ job_info = self.json_page.raw_json
+ #logger.debug("OARESTAPI ParseJobsIds %s" %(self.json_page.raw_json))
+ values = []
+ try:
+ for k in job_resources:
+ values.append(job_info[k])
+ return dict(zip(job_resources, values))
+
+ except KeyError:
+ logger.log_exc("ParseJobsIds KeyError ")
+
+
+ def ParseJobsIdResources(self):
+ """ Parses the json produced by the request
+ /oarapi/jobs/id/resources.json.
+ Returns a list of oar node ids that are scheduled for the
+ given job id.
+
+ """
+ job_resources = []
+ for resource in self.json_page.raw_json['items']:
+ job_resources.append(resource['id'])
+
+ return job_resources
+
+ def ParseResources(self):
+ """ Parses the json produced by a get_resources request on oar."""
+
+ #logger.debug("OARESTAPI \tParseResources " )
+ #resources are listed inside the 'items' list from the json
+ self.json_page.raw_json = self.json_page.raw_json['items']
+ self.ParseNodes()
+
+ def ParseReservedNodes(self):
+ """ Returns an array containing the list of the jobs scheduled
+ with the reserved nodes if available.
+
+ :returns: list of job dicts, each dict containing the following keys:
+ t_from, t_until, resources_ids (of the reserved nodes for this job).
+ If the information is not available, default values will be set for
+ these keys. The other keys are : state, lease_id and user.
+ :rtype: list
+
+ """
+
+ #resources are listed inside the 'items' list from the json
+ reservation_list = []
+ job = {}
+ #Parse resources info
+ for json_element in self.json_page.raw_json['items']:
+ #In case it is a real reservation (not asap case)
+ if json_element['scheduled_start']:
+ job['t_from'] = json_element['scheduled_start']
+ job['t_until'] = int(json_element['scheduled_start']) + \
+ int(json_element['walltime'])
+ #Get resources id list for the job
+ job['resource_ids'] = [node_dict['id'] for node_dict
+ in json_element['resources']]
+ else:
+ job['t_from'] = "As soon as possible"
+ job['t_until'] = "As soon as possible"
+ job['resource_ids'] = ["Undefined"]
+
+ job['state'] = json_element['state']
+ job['lease_id'] = json_element['id']
+
+ job['user'] = json_element['owner']
+ #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))
+ reservation_list.append(job)
+ #reset dict
+ job = {}
+ return reservation_list
+
+ def ParseRunningJobs(self):
+ """ Gets the list of nodes currently in use from the attributes of the
+ running jobs.
+
+ :returns: list of hostnames, the nodes that are currently involved in
+ running jobs.
+ :rtype: list
+
+
+ """
+ logger.debug("OARESTAPI \tParseRunningJobs_________________ ")
+ #resources are listed inside the 'items' list from the json
+ nodes = []
+ for job in self.json_page.raw_json['items']:
+ for node in job['nodes']:
+ nodes.append(node['network_address'])
+ return nodes
+
+ def ChangeRawJsonDependingOnApilibVersion(self):
+ """
+ Check if the OAR apilib version is different from 0.2.10, in which case
+ the Json answer is also dict instead as a plain list.
+
+ .. warning:: the whole code is assuming the json contains a 'items' key
+ .. seealso:: ConcatenateJsonPages, ParseJobs, ParseReservedNodes,
+ ParseJobsIdResources, ParseResources, ParseRunningJobs
+ .. todo:: Clean the whole code. Either suppose the apilib will always
+ provide the 'items' key, or handle different options.
+ """
+
+ if self.version_json_dict['apilib_version'] != "0.2.10":
+ self.json_page.raw_json = self.json_page.raw_json['items']
+
+ def ParseDeleteJobs(self):
+ """ No need to parse anything in this function.A POST
+ is done to delete the job.
+
+ """
+ return
+
+ def ParseResourcesFull(self):
+ """ This method is responsible for parsing all the attributes
+ of all the nodes returned by OAR when issuing a get resources full.
+ The information from the nodes and the sites are separated.
+ Updates the node_dictlist so that the dictionnary of the platform's
+ nodes is available afterwards.
+
+ :returns: node_dictlist, a list of dictionaries about the nodes and
+ their properties.
+ :rtype: list
+
+ """
+ logger.debug("OARRESTAPI ParseResourcesFull___________ ")
+ #print self.json_page.raw_json[1]
+ #resources are listed inside the 'items' list from the json
+ self.ChangeRawJsonDependingOnApilibVersion()
+ self.ParseNodes()
+ self.ParseSites()
+ return self.node_dictlist
+
+ def ParseResourcesFullSites(self):
+ """ Called by GetSites which is unused.
+ Originally used to get information from the sites, with for each site
+ the list of nodes it has, along with their properties.
+
+ :return: site_dict, dictionary of sites
+ :rtype: dict
+
+ .. warning:: unused
+ .. seealso:: GetSites (IotlabTestbedAPI)
+
+ """
+ self.ChangeRawJsonDependingOnApilibVersion()
+ self.ParseNodes()
+ self.ParseSites()
+ return self.site_dict
+
+
+ def ParseNodes(self):
+ """ Parse nodes properties from OAR
+ Put them into a dictionary with key = node id and value is a dictionary
+ of the node properties and properties'values.
+
+ """
+ node_id = None
+ _resources_fulljson_dict = \
+ self.parsing_resourcesfull.resources_fulljson_dict
+ keys = _resources_fulljson_dict.keys()
+ keys.sort()
+
+ for dictline in self.json_page.raw_json:
+ node_id = None
+ # dictionary is empty and/or a new node has to be inserted
+ node_id = _resources_fulljson_dict['network_address'](
+ self.node_dictlist, dictline['network_address'])
+ for k in keys:
+ if k in dictline:
+ if k == 'network_address':
+ continue
+
+ _resources_fulljson_dict[k](
+ self.node_dictlist[node_id], dictline[k])
+
+ #The last property has been inserted in the property tuple list,
+ #reset node_id
+ #Turn the property tuple list (=dict value) into a dictionary
+ self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+ node_id = None
+
+ @staticmethod
+ def iotlab_hostname_to_hrn(root_auth, hostname):
+ """
+ Transforms a node hostname into a SFA hrn.
+
+ :param root_auth: Name of the root authority of the SFA server. In
+ our case, it is set to iotlab.
+ :param hostname: node's hotname, given by OAR.
+ :type root_auth: string
+ :type hostname: string
+ :returns: inserts the root_auth and '.' before the hostname.
+ :rtype: string
+
+ """
+ return root_auth + '.' + hostname
+
+ def ParseSites(self):
+ """ Returns a list of dictionnaries containing the sites' attributes."""
+
+ nodes_per_site = {}
+ config = Config()
+ #logger.debug(" OARrestapi.py \tParseSites self.node_dictlist %s"\
+ #%(self.node_dictlist))
+ # Create a list of nodes per site_id
+ for node_id in self.node_dictlist:
+ node = self.node_dictlist[node_id]
+
+ if node['site'] not in nodes_per_site:
+ nodes_per_site[node['site']] = []
+ nodes_per_site[node['site']].append(node['node_id'])
+ else:
+ if node['node_id'] not in nodes_per_site[node['site']]:
+ nodes_per_site[node['site']].append(node['node_id'])
+
+ #Create a site dictionary whose key is site_login_base
+ # (name of the site) and value is a dictionary of properties,
+ # including the list of the node_ids
+ for node_id in self.node_dictlist:
+ node = self.node_dictlist[node_id]
+ node.update({'hrn': self.iotlab_hostname_to_hrn(self.interface_hrn,
+ node['hostname'])})
+ self.node_dictlist.update({node_id: node})
+
+ if node['site'] not in self.site_dict:
+ self.site_dict[node['site']] = {
+ 'site': node['site'],
+ 'node_ids': nodes_per_site[node['site']],
+ 'latitude': "48.83726",
+ 'longitude': "- 2.10336",
+ 'name': config.SFA_REGISTRY_ROOT_AUTH,
+ 'pcu_ids': [], 'max_slices': None,
+ 'ext_consortium_id': None,
+ 'max_slivers': None, 'is_public': True,
+ 'peer_site_id': None,
+ 'abbreviated_name': "iotlab", 'address_ids': [],
+ 'url': "https://portal.senslab.info", 'person_ids': [],
+ 'site_tag_ids': [], 'enabled': True, 'slice_ids': [],
+ 'date_created': None, 'peer_id': None
+ }
+
+ OARrequests_uri_dict = {
+ 'GET_version':
+ {'uri': '/oarapi/version.json', 'parse_func': ParseVersion},
+
+ 'GET_timezone':
+ {'uri': '/oarapi/timezone.json', 'parse_func': ParseTimezone},
+
+ 'GET_jobs':
+ {'uri': '/oarapi/jobs.json', 'parse_func': ParseJobs},
+
+ 'GET_jobs_id':
+ {'uri': '/oarapi/jobs/id.json', 'parse_func': ParseJobsIds},
+
+ 'GET_jobs_id_resources':
+ {'uri': '/oarapi/jobs/id/resources.json',
+ 'parse_func': ParseJobsIdResources},
+
+ 'GET_jobs_table':
+ {'uri': '/oarapi/jobs/table.json', 'parse_func': ParseJobsTable},
+
+ 'GET_jobs_details':
+ {'uri': '/oarapi/jobs/details.json', 'parse_func': ParseJobsDetails},
+
+ 'GET_reserved_nodes':
+ {'uri':
+ '/oarapi/jobs/details.json?state=Running,Waiting,Launching',
+ 'owner': '&user=', 'parse_func': ParseReservedNodes},
+
+ 'GET_running_jobs':
+ {'uri': '/oarapi/jobs/details.json?state=Running',
+ 'parse_func': ParseRunningJobs},
+
+ 'GET_resources_full':
+ {'uri': '/oarapi/resources/full.json',
+ 'parse_func': ParseResourcesFull},
+
+ 'GET_sites':
+ {'uri': '/oarapi/resources/full.json',
+ 'parse_func': ParseResourcesFullSites},
+
+ 'GET_resources':
+ {'uri': '/oarapi/resources.json', 'parse_func': ParseResources},
+
+ 'DELETE_jobs_id':
+ {'uri': '/oarapi/jobs/id.json', 'parse_func': ParseDeleteJobs}}
+
+
+ def SendRequest(self, request, strval=None, username=None):
+ """ Connects to OAR , sends the valid GET requests and uses
+ the appropriate json parsing functions.
+
+ :returns: calls to the appropriate parsing function, associated with the
+ GET request
+ :rtype: depends on the parsing function called.
+
+ .. seealso:: OARrequests_uri_dict
+ """
+ save_json = None
+
+ self.json_page.ResetNextPage()
+ save_json = []
+
+ if request in self.OARrequests_uri_dict:
+ while self.json_page.next_page:
+ self.json_page.raw_json = self.server.GETRequestToOARRestAPI(
+ request,
+ strval,
+ self.json_page.next_offset,
+ username)
+ self.json_page.FindNextPage()
+ if self.json_page.concatenate:
+ save_json.append(self.json_page.raw_json)
+
+ if self.json_page.concatenate and self.json_page.end:
+ self.json_page.raw_json = \
+ self.json_page.ConcatenateJsonPages(save_json)
+
+ return self.OARrequests_uri_dict[request]['parse_func'](self)
+ else:
+ logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST "
+ % (request))
--- /dev/null
+"""
+File providing methods to generate valid RSpecs for the Iotlab testbed.
+Contains methods to get information on slice, slivers, nodes and leases,
+formatting them and turn it into a RSpec.
+"""
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, get_authority
+
+from sfa.rspecs.rspec import RSpec
+#from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition, \
+ IotlabNode, IotlabLocation
+
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn
+
+
+def iotlab_xrn_to_hostname(xrn):
+ """Returns a node's hostname from its xrn.
+ :param xrn: The nodes xrn identifier.
+ :type xrn: Xrn (from sfa.util.xrn)
+
+ :returns: node's hostname.
+ :rtype: string
+
+ """
+ return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
+
+
+def iotlab_xrn_object(root_auth, hostname):
+ """Creates a valid xrn object from the node's hostname and the authority
+ of the SFA server.
+
+ :param hostname: the node's hostname.
+ :param root_auth: the SFA root authority.
+ :type hostname: string
+ :type root_auth: string
+
+ :returns: the iotlab node's xrn
+ :rtype: Xrn
+
+ """
+ return Xrn('.'.join([root_auth, Xrn.escape(hostname)]), type='node')
+
+
+class IotlabAggregate:
+ """Aggregate manager class for Iotlab. """
+
+ sites = {}
+ nodes = {}
+ api = None
+ interfaces = {}
+ links = {}
+ node_tags = {}
+
+ prepared = False
+
+ user_options = {}
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_slice_and_slivers(self, slice_xrn, login=None):
+ """
+ Get the slices and the associated leases if any from the iotlab
+ testbed. One slice can have mutliple leases.
+ For each slice, get the nodes in the associated lease
+ and create a sliver with the necessary info and insert it into the
+ sliver dictionary, keyed on the node hostnames.
+ Returns a dict of slivers based on the sliver's node_id.
+ Called by get_rspec.
+
+
+ :param slice_xrn: xrn of the slice
+ :param login: user's login on iotlab ldap
+
+ :type slice_xrn: string
+ :type login: string
+ :returns: a list of slices dict and a list of Sliver object
+ :rtype: (list, list)
+
+ .. note: There is no real slivers in iotlab, only leases. The goal
+ is to be consistent with the SFA standard.
+
+ """
+ slivers = {}
+ sfa_slice = None
+ if slice_xrn is None:
+ return (sfa_slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = slice_hrn
+
+ # GetSlices always returns a list, even if there is only one element
+ slices = self.driver.iotlab_api.GetSlices(slice_filter=str(slice_name),
+ slice_filter_type='slice_hrn',
+ login=login)
+
+ logger.debug("IotlabAggregate api \tget_slice_and_slivers \
+ slice_hrn %s \r\n slices %s self.driver.hrn %s"
+ % (slice_hrn, slices, self.driver.hrn))
+ if slices == []:
+ return (sfa_slice, slivers)
+
+ # sort slivers by node id , if there is a job
+ #and therefore, node allocated to this slice
+ # for sfa_slice in slices:
+ sfa_slice = slices[0]
+ try:
+ node_ids_list = sfa_slice['node_ids']
+ except KeyError:
+ logger.log_exc("IOTLABAGGREGATE \t \
+ get_slice_and_slivers No nodes in the slice \
+ - KeyError ")
+ node_ids_list = []
+ # continue
+
+ for node in node_ids_list:
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
+ sliver_xrn.set_authority(self.driver.hrn)
+ sliver = Sliver({'sliver_id': sliver_xrn.urn,
+ 'name': sfa_slice['hrn'],
+ 'type': 'iotlab-node',
+ 'tags': []})
+
+ slivers[node] = sliver
+
+ #Add default sliver attribute :
+ #connection information for iotlab
+ # if get_authority(sfa_slice['hrn']) == self.driver.iotlab_api.root_auth:
+ # tmp = sfa_slice['hrn'].split('.')
+ # ldap_username = tmp[1].split('_')[0]
+ # ssh_access = None
+ # slivers['default_sliver'] = {'ssh': ssh_access,
+ # 'login': ldap_username}
+ # look in ldap:
+ ldap_username = self.find_ldap_username_from_slice(sfa_slice)
+
+ if ldap_username is not None:
+ ssh_access = None
+ slivers['default_sliver'] = {'ssh': ssh_access,
+ 'login': ldap_username}
+
+
+ logger.debug("IOTLABAGGREGATE api get_slice_and_slivers slivers %s "
+ % (slivers))
+ return (slices, slivers)
+
+ def find_ldap_username_from_slice(self, sfa_slice):
+ researchers = [sfa_slice['reg_researchers'][0].__dict__]
+ # look in ldap:
+ ldap_username = None
+ ret = self.driver.iotlab_api.GetPersons(researchers)
+ if len(ret) != 0:
+ ldap_username = ret[0]['uid']
+
+ return ldap_username
+
+
+ def get_nodes(self, slices=None, slivers=[], options=None):
+ """Returns the nodes in the slice using the rspec format, with all the
+ nodes' properties.
+
+ Fetch the nodes ids in the slices dictionary and get all the nodes
+ properties from OAR. Makes a rspec dicitonary out of this and returns
+ it. If the slice does not have any job running or scheduled, that is
+ it has no reserved nodes, then returns an empty list.
+
+ :param slices: list of slices (record dictionaries)
+ :param slivers: the list of slivers in all the slices
+ :type slices: list of dicts
+ :type slivers: list of Sliver object (dictionaries)
+ :returns: An empty list if the slice has no reserved nodes, a rspec
+ list with all the nodes and their properties (a dict per node)
+ otherwise.
+ :rtype: list
+
+ .. seealso:: get_slice_and_slivers
+
+ """
+ # NT: the semantic of this function is not clear to me :
+ # if slice is not defined, then all the nodes should be returned
+ # if slice is defined, we should return only the nodes that
+ # are part of this slice
+ # but what is the role of the slivers parameter ?
+ # So i assume that slice['node_ids'] will be the same as slivers for us
+ slice_nodes_list = []
+ if slices is not None:
+ for one_slice in slices:
+ try:
+ slice_nodes_list = one_slice['node_ids']
+ # if we are dealing with a slice that has no node just
+ # return an empty list. In iotlab a slice can have multiple
+ # jobs scheduled, so it either has at least one lease or
+ # not at all.
+ except KeyError:
+ return []
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.iotlab_api.GetLeaseGranularity()
+
+ nodes = self.driver.iotlab_api.GetNodes()
+
+ nodes_dict = {}
+
+ #if slices, this means we got to list all the nodes given to this slice
+ # Make a list of all the nodes in the slice before getting their
+ #attributes
+ rspec_nodes = []
+
+ logger.debug("IOTLABAGGREGATE api get_nodes slices %s "
+ % (slices))
+
+
+ reserved_nodes = self.driver.iotlab_api.GetNodesCurrentlyInUse()
+ logger.debug("IOTLABAGGREGATE api get_nodes slice_nodes_list %s "
+ % (slice_nodes_list))
+ for node in nodes:
+ nodes_dict[node['node_id']] = node
+ if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
+
+ rspec_node = IotlabNode()
+ # xxx how to retrieve site['login_base']
+ #site_id=node['site_id']
+ #site=sites_dict[site_id]
+
+ rspec_node['mobile'] = node['mobile']
+ rspec_node['archi'] = node['archi']
+ rspec_node['radio'] = node['radio']
+
+ iotlab_xrn = iotlab_xrn_object(self.driver.iotlab_api.root_auth,
+ node['hostname'])
+ rspec_node['component_id'] = iotlab_xrn.urn
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = \
+ hrn_to_urn(self.driver.iotlab_api.root_auth,
+ 'authority+sa')
+
+ # Iotlab's nodes are federated : there is only one authority
+ # for all Iotlab sites, registered in SFA.
+ # Removing the part including the site
+ # in authority_id SA 27/07/12
+ rspec_node['authority_id'] = rspec_node['component_manager_id']
+
+ # do not include boot state (<available> element)
+ #in the manifest rspec
+
+
+ rspec_node['boot_state'] = node['boot_state']
+ if node['hostname'] in reserved_nodes:
+ rspec_node['boot_state'] = "Reserved"
+ rspec_node['exclusive'] = 'true'
+ rspec_node['hardware_types'] = [HardwareType({'name': \
+ 'iotlab-node'})]
+
+
+ location = IotlabLocation({'country':'France', 'site': \
+ node['site']})
+ rspec_node['location'] = location
+
+
+ position = IotlabPosition()
+ for field in position :
+ try:
+ position[field] = node[field]
+ except KeyError, error :
+ logger.log_exc("IOTLABAGGREGATE\t get_nodes \
+ position %s "% (error))
+
+ rspec_node['position'] = position
+ #rspec_node['interfaces'] = []
+
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+ rspec_node['tags'] = []
+ if node['hostname'] in slivers:
+ # add sliver info
+ sliver = slivers[node['hostname']]
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+ # slivers always provide the ssh service
+ login = Login({'authentication': 'ssh-keys', \
+ 'hostname': node['hostname'], 'port':'22', \
+ 'username': sliver['name']})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
+ rspec_nodes.append(rspec_node)
+
+ return (rspec_nodes)
+
+ def get_all_leases(self, ldap_username):
+ """
+
+ Get list of lease dictionaries which all have the mandatory keys
+ ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
+ All the leases running or scheduled are returned.
+
+ :param ldap_username: if ldap uid is not None, looks for the leases
+ belonging to this user.
+ :type ldap_username: string
+ :returns: rspec lease dictionary with keys lease_id, component_id,
+ slice_id, start_time, duration.
+ :rtype: dict
+
+ .. note::There is no filtering of leases within a given time frame.
+ All the running or scheduled leases are returned. options
+ removed SA 15/05/2013
+
+
+ """
+
+ #now = int(time.time())
+ #lease_filter = {'clip': now }
+
+ #if slice_record:
+ #lease_filter.update({'name': slice_record['name']})
+
+ #leases = self.driver.iotlab_api.GetLeases(lease_filter)
+
+ logger.debug("IOTLABAGGREGATE get_all_leases ldap_username %s "
+ % (ldap_username))
+ leases = self.driver.iotlab_api.GetLeases(login=ldap_username)
+ grain = self.driver.iotlab_api.GetLeaseGranularity()
+ # site_ids = []
+ rspec_leases = []
+ for lease in leases:
+ #as many leases as there are nodes in the job
+ for node in lease['reserved_nodes']:
+ rspec_lease = Lease()
+ rspec_lease['lease_id'] = lease['lease_id']
+ #site = node['site_id']
+ iotlab_xrn = iotlab_xrn_object(self.driver.iotlab_api.root_auth,
+ node)
+ rspec_lease['component_id'] = iotlab_xrn.urn
+ #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
+ #site, node['hostname'])
+ try:
+ rspec_lease['slice_id'] = lease['slice_id']
+ except KeyError:
+ #No info on the slice used in testbed_xp table
+ pass
+ rspec_lease['start_time'] = lease['t_from']
+ rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
+ / grain
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
+
+ def get_rspec(self, slice_xrn=None, login=None, version=None,
+ options=None):
+ """
+ Returns xml rspec:
+ - a full advertisement rspec with the testbed resources if slice_xrn is
+ not specified.If a lease option is given, also returns the leases
+ scheduled on the testbed.
+ - a manifest Rspec with the leases and nodes in slice's leases if
+ slice_xrn is not None.
+
+ :param slice_xrn: srn of the slice
+ :type slice_xrn: string
+ :param login: user'uid (ldap login) on iotlab
+ :type login: string
+ :param version: can be set to sfa or iotlab
+ :type version: RSpecVersion
+ :param options: used to specify if the leases should also be included in
+ the returned rspec.
+ :type options: dict
+
+ :returns: Xml Rspec.
+ :rtype: XML
+
+
+ """
+
+ ldap_username= None
+ rspec = None
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ logger.debug("IotlabAggregate \t get_rspec ***version %s \
+ version.type %s version.version %s options %s \r\n"
+ % (version, version.type, version.version, options))
+
+ if slice_xrn is None:
+ rspec_version = version_manager._get_version(version.type,
+ version.version, 'ad')
+
+ else:
+ rspec_version = version_manager._get_version(
+ version.type, version.version, 'manifest')
+
+ slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
+ if slice_xrn and slices is not None:
+ #Get user associated with this slice
+ #for one_slice in slices :
+ ldap_username = self.find_ldap_username_from_slice(slices[0])
+ # ldap_username = slices[0]['reg_researchers'][0].__dict__['hrn']
+ # # ldap_username = slices[0]['user']
+ # tmp = ldap_username.split('.')
+ # ldap_username = tmp[1]
+ logger.debug("IotlabAggregate \tget_rspec **** \
+ LDAP USERNAME %s \r\n" \
+ % (ldap_username))
+ #at this point sliver may be empty if no iotlab job
+ #is running for this user/slice.
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+ logger.debug("\r\n \r\n IotlabAggregate \tget_rspec *** \
+ slice_xrn %s slices %s\r\n \r\n"
+ % (slice_xrn, slices))
+
+ if options is not None:
+ lease_option = options['list_leases']
+ else:
+ #If no options are specified, at least print the resources
+ lease_option = 'all'
+ #if slice_xrn :
+ #lease_option = 'all'
+
+ if lease_option in ['all', 'resources']:
+ #if not options.get('list_leases') or options.get('list_leases')
+ #and options['list_leases'] != 'leases':
+ nodes = self.get_nodes(slices, slivers)
+ logger.debug("\r\n")
+ logger.debug("IotlabAggregate \t lease_option %s \
+ get rspec ******* nodes %s"
+ % (lease_option, nodes))
+
+ sites_set = set([node['location']['site'] for node in nodes])
+
+ #In case creating a job, slice_xrn is not set to None
+ rspec.version.add_nodes(nodes)
+ if slice_xrn and slices is not None:
+ # #Get user associated with this slice
+ # #for one_slice in slices :
+ # ldap_username = slices[0]['reg_researchers']
+ # # ldap_username = slices[0]['user']
+ # tmp = ldap_username.split('.')
+ # ldap_username = tmp[1]
+ # # ldap_username = tmp[1].split('_')[0]
+
+ logger.debug("IotlabAggregate \tget_rspec **** \
+ version type %s ldap_ user %s \r\n" \
+ % (version.type, ldap_username))
+ if version.type == "Iotlab":
+ rspec.version.add_connection_information(
+ ldap_username, sites_set)
+
+ default_sliver = slivers.get('default_sliver', [])
+ if default_sliver and len(nodes) is not 0:
+ #default_sliver_attribs = default_sliver.get('tags', [])
+ logger.debug("IotlabAggregate \tget_rspec **** \
+ default_sliver%s \r\n" % (default_sliver))
+ for attrib in default_sliver:
+ rspec.version.add_default_sliver_attribute(
+ attrib, default_sliver[attrib])
+
+ if lease_option in ['all','leases']:
+ leases = self.get_all_leases(ldap_username)
+ rspec.version.add_leases(leases)
+ logger.debug("IotlabAggregate \tget_rspec **** \
+ FINAL RSPEC %s \r\n" % (rspec.toxml()))
+ return rspec.toxml()
+"""
+File containing the IotlabTestbedAPI, used to interact with nodes, users,
+slices, leases and keys, as well as the dedicated iotlab database and table,
+holding information about which slice is running which job.
+
+"""
from datetime import datetime
from sfa.util.sfalogging import logger
from sfa.storage.alchemy import dbsession
from sqlalchemy.orm import joinedload
from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
-from sfa.senslab.slabpostgres import slab_dbsession, SenslabXP
-
-from sfa.senslab.OARrestapi import OARrestapi
-from sfa.senslab.LDAPapi import LDAPapi
+from sfa.iotlab.iotlabpostgres import TestbedAdditionalSfaDB, LeaseTableXP
+from sfa.iotlab.OARrestapi import OARrestapi
+from sfa.iotlab.LDAPapi import LDAPapi
from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
from sfa.trust.certificate import Keypair, convert_public_key
from sfa.trust.gid import create_uuid
from sfa.trust.hierarchy import Hierarchy
-
-from sfa.senslab.slabaggregate import slab_xrn_object
-class SlabTestbedAPI():
+from sfa.iotlab.iotlabaggregate import iotlab_xrn_object
+
+class IotlabTestbedAPI():
""" Class enabled to use LDAP and OAR api calls. """
-
+
+ _MINIMUM_DURATION = 10 # 10 units of granularity 60 s, 10 mins
+
def __init__(self, config):
"""Creates an instance of OARrestapi and LDAPapi which will be used to
issue calls to OAR or LDAP methods.
- Set the time format and the testbed granularity used for OAR
- reservation and leases.
-
+ Set the time format and the testbed granularity used for OAR
+ reservation and leases.
+
:param config: configuration object from sfa.util.config
- :type config: Config object
+ :type config: Config object
"""
+ self.iotlab_db = TestbedAdditionalSfaDB(config)
self.oar = OARrestapi()
self.ldap = LDAPapi()
self.time_format = "%Y-%m-%d %H:%M:%S"
self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
- self.grain = 1 # 10 mins lease minimum
+ self.grain = 60 # 10 mins lease minimum, 60 sec granularity
#import logging, logging.handlers
#from sfa.util.sfalogging import _SfaLogger
#sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', \
#level=logging.DEBUG)
return
-
-
+
+ @staticmethod
+ def GetMinExperimentDurationInGranularity():
+ """ Returns the minimum allowed duration for an experiment on the
+ testbed. In seconds.
+
+ """
+ return IotlabTestbedAPI._MINIMUM_DURATION
+
@staticmethod
- def GetMinExperimentDurationInSec():
- return 600
-
- @staticmethod
- def GetPeers (peer_filter=None ):
+ def GetPeers(peer_filter=None ):
""" Gathers registered authorities in SFA DB and looks for specific peer
- if peer_filter is specified.
+ if peer_filter is specified.
:param peer_filter: name of the site authority looked for.
:type peer_filter: string
- :return: list of records.
-
+ :returns: list of records.
+
"""
existing_records = {}
existing_hrns_by_types = {}
- logger.debug("SLABDRIVER \tGetPeers peer_filter %s, \
- " %(peer_filter))
+ logger.debug("IOTLAB_API \tGetPeers peer_filter %s " % (peer_filter))
all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
-
+
for record in all_records:
existing_records[(record.hrn, record.type)] = record
if record.type not in existing_hrns_by_types:
else:
existing_hrns_by_types[record.type].append(record.hrn)
-
- logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
- %( existing_hrns_by_types))
- records_list = []
-
- try:
+ logger.debug("IOTLAB_API \tGetPeer\texisting_hrns_by_types %s "
+ % (existing_hrns_by_types))
+ records_list = []
+
+ try:
if peer_filter:
- records_list.append(existing_records[(peer_filter,'authority')])
- else :
+ records_list.append(existing_records[(peer_filter,
+ 'authority')])
+ else:
for hrn in existing_hrns_by_types['authority']:
- records_list.append(existing_records[(hrn,'authority')])
-
- logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
- %(records_list))
+ records_list.append(existing_records[(hrn, 'authority')])
+
+ logger.debug("IOTLAB_API \tGetPeer \trecords_list %s "
+ % (records_list))
except KeyError:
pass
-
+
return_records = records_list
- logger.debug("SLABDRIVER \tGetPeer return_records %s " \
- %(return_records))
+ logger.debug("IOTLAB_API \tGetPeer return_records %s "
+ % (return_records))
return return_records
-
-
- #TODO : Handling OR request in make_ldap_filters_from_records
- #instead of the for loop
+ #TODO : Handling OR request in make_ldap_filters_from_records
+ #instead of the for loop
#over the records' list
def GetPersons(self, person_filter=None):
"""
- Get the enabled users and their properties from Senslab LDAP.
+ Get the enabled users and their properties from Iotlab LDAP.
If a filter is specified, looks for the user whose properties match
the filter, otherwise returns the whole enabled users'list.
- :param person_filter: Must be a list of dictionnaries
- with users properties when not set to None.
- :param person_filter: list of dict
- :return:Returns a list of users whose accounts are enabled
- found in ldap.
+
+ :param person_filter: Must be a list of dictionnaries with users
+ properties when not set to None.
+ :type person_filter: list of dict
+
+ :returns: Returns a list of users whose accounts are enabled
+ found in ldap.
:rtype: list of dicts
-
+
"""
- logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
- %(person_filter))
+ logger.debug("IOTLAB_API \tGetPersons person_filter %s"
+ % (person_filter))
person_list = []
if person_filter and isinstance(person_filter, list):
#If we are looking for a list of users (list of dict records)
#Usually the list contains only one user record
for searched_attributes in person_filter:
-
- #Get only enabled user accounts in senslab LDAP :
+
+ #Get only enabled user accounts in iotlab LDAP :
#add a filter for make_ldap_filters_from_record
- person = self.ldap.LdapFindUser(searched_attributes, \
- is_user_enabled=True)
+ person = self.ldap.LdapFindUser(searched_attributes,
+ is_user_enabled=True)
#If a person was found, append it to the list
if person:
person_list.append(person)
-
+
#If the list is empty, return None
if len(person_list) is 0:
person_list = None
-
+
else:
- #Get only enabled user accounts in senslab LDAP :
+ #Get only enabled user accounts in iotlab LDAP :
#add a filter for make_ldap_filters_from_record
- person_list = self.ldap.LdapFindUser(is_user_enabled=True)
+ person_list = self.ldap.LdapFindUser(is_user_enabled=True)
return person_list
#SendRequest("GET_timezone")
#return server_timestamp, server_tz
- def DeleteJobs(self, job_id, username):
-
- """ Deletes the job with the specified job_id and username on OAR by
- posting a delete request to OAR.
-
+ def DeleteJobs(self, job_id, username):
+ """
+
+ Deletes the job with the specified job_id and username on OAR by
+ posting a delete request to OAR.
+
:param job_id: job id in OAR.
- :param username: user's senslab login in LDAP.
- :type job_id:integer
+ :param username: user's iotlab login in LDAP.
+ :type job_id: integer
:type username: string
-
- :return: dictionary with the job id and if delete has been successful
- (True) or no (False)
+
+ :returns: dictionary with the job id and if delete has been successful
+ (True) or no (False)
:rtype: dict
- """
- logger.debug("SLABDRIVER \tDeleteJobs jobid %s username %s "\
- %(job_id, username))
+
+ """
+ logger.debug("IOTLAB_API \tDeleteJobs jobid %s username %s "
+ % (job_id, username))
if not job_id or job_id is -1:
return
reqdict = {}
reqdict['method'] = "delete"
reqdict['strval'] = str(job_id)
-
- answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
- reqdict,username)
+ answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id',
+ reqdict, username)
if answer['status'] == 'Delete request registered':
- ret = {job_id : True }
+ ret = {job_id: True}
else:
- ret = {job_id :False }
- logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
- username %s" %(job_id, answer, username))
+ ret = {job_id: False}
+ logger.debug("IOTLAB_API \tDeleteJobs jobid %s \r\n answer %s \
+ username %s" % (job_id, answer, username))
return ret
-
-
+
+
##TODO : Unused GetJobsId ? SA 05/07/12
#def GetJobsId(self, job_id, username = None ):
#"""
- #Details about a specific job.
- #Includes details about submission time, jot type, state, events,
+ #Details about a specific job.
+ #Includes details about submission time, jot type, state, events,
#owner, assigned ressources, walltime etc...
-
+
#"""
#req = "GET_jobs_id"
#node_list_k = 'assigned_network_address'
- ##Get job info from OAR
+ ##Get job info from OAR
#job_info = self.oar.parser.SendRequest(req, job_id, username)
- #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
+ #logger.debug("IOTLAB_API \t GetJobsId %s " %(job_info))
#try:
#if job_info['state'] == 'Terminated':
- #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
+ #logger.debug("IOTLAB_API \t GetJobsId job %s TERMINATED"\
#%(job_id))
#return None
#if job_info['state'] == 'Error':
- #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
+ #logger.debug("IOTLAB_API \t GetJobsId ERROR message %s "\
#%(job_info))
#return None
-
+
#except KeyError:
- #logger.error("SLABDRIVER \tGetJobsId KeyError")
- #return None
-
+ #logger.error("IOTLAB_API \tGetJobsId KeyError")
+ #return None
+
#parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
#node_list_k)
- ##Replaces the previous entry
+ ##Replaces the previous entry
##"assigned_network_address" / "reserved_resources"
##with "node_ids"
#job_info.update({'node_ids':parsed_job_info[node_list_k]})
#del job_info[node_list_k]
- #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
+ #logger.debug(" \r\nIOTLAB_API \t GetJobsId job_info %s " %(job_info))
#return job_info
-
+
def GetJobsResources(self, job_id, username = None):
- """ Gets the list of nodes associated with the job_id and username
- if provided.
- Transforms the senslab hostnames to the corresponding
+ """ Gets the list of nodes associated with the job_id and username
+ if provided.
+ Transforms the iotlab hostnames to the corresponding
SFA nodes hrns.
- Rertuns dict key :'node_ids' , value : hostnames list
+ Rertuns dict key :'node_ids' , value : hostnames list
:param username: user's LDAP login
:paran job_id: job's OAR identifier.
:type username: string
:type job_id: integer
-
- :return: dicionary with nodes' hostnames belonging to the job.
+
+ :returns: dicionary with nodes' hostnames belonging to the job.
:rtype: dict
+ .. warning: Unused. SA 16/10/13
"""
req = "GET_jobs_id_resources"
-
-
- #Get job resources list from OAR
+
+
+ #Get job resources list from OAR
node_id_list = self.oar.parser.SendRequest(req, job_id, username)
- logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
-
+ logger.debug("IOTLAB_API \t GetJobsResources %s " %(node_id_list))
+
hostname_list = \
self.__get_hostnames_from_oar_node_ids(node_id_list)
-
- #Replaces the previous entry "assigned_network_address" /
+
+ #Replaces the previous entry "assigned_network_address" /
#"reserved_resources" with "node_ids"
job_info = {'node_ids': hostname_list}
return job_info
-
+
#def get_info_on_reserved_nodes(self, job_info, node_list_name):
#"""
#..warning:unused SA 23/05/13
#"""
- ##Get the list of the testbed nodes records and make a
+ ##Get the list of the testbed nodes records and make a
##dictionnary keyed on the hostname out of it
- #node_list_dict = self.GetNodes()
+ #node_list_dict = self.GetNodes()
##node_hostname_list = []
- #node_hostname_list = [node['hostname'] for node in node_list_dict]
+ #node_hostname_list = [node['hostname'] for node in node_list_dict]
##for node in node_list_dict:
##node_hostname_list.append(node['hostname'])
#node_dict = dict(zip(node_hostname_list, node_list_dict))
#try :
#reserved_node_hostname_list = []
#for index in range(len(job_info[node_list_name])):
- ##job_info[node_list_name][k] =
+ ##job_info[node_list_name][k] =
#reserved_node_hostname_list[index] = \
#node_dict[job_info[node_list_name][index]]['hostname']
-
- #logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
+
+ #logger.debug("IOTLAB_API \t get_info_on_reserved_nodes \
#reserved_node_hostname_list %s" \
#%(reserved_node_hostname_list))
#except KeyError:
- #logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
-
- #return reserved_node_hostname_list
-
+ #logger.error("IOTLAB_API \t get_info_on_reserved_nodes KEYERROR " )
+
+ #return reserved_node_hostname_list
+
def GetNodesCurrentlyInUse(self):
- """Returns a list of all the nodes already involved in an oar running
+ """Returns a list of all the nodes already involved in an oar running
job.
- :rtype: list of nodes hostnames.
+ :rtype: list of nodes hostnames.
"""
- return self.oar.parser.SendRequest("GET_running_jobs")
-
- def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
+ return self.oar.parser.SendRequest("GET_running_jobs")
+
+ def __get_hostnames_from_oar_node_ids(self, oar_id_node_dict,
+ resource_id_list ):
"""Get the hostnames of the nodes from their OAR identifiers.
Get the list of nodes dict using GetNodes and find the hostname
associated with the identifier.
+ :param oar_id_node_dict: full node dictionary list keyed by oar node id
:param resource_id_list: list of nodes identifiers
:returns: list of node hostnames.
"""
- full_nodes_dict_list = self.GetNodes()
- #Put the full node list into a dictionary keyed by oar node id
- oar_id_node_dict = {}
- for node in full_nodes_dict_list:
- oar_id_node_dict[node['oar_id']] = node
-
- hostname_list = []
+
+ hostname_list = []
for resource_id in resource_id_list:
#Because jobs requested "asap" do not have defined resources
if resource_id is not "Undefined":
hostname_list.append(\
oar_id_node_dict[resource_id]['hostname'])
-
+
#hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
- return hostname_list
-
- def GetReservedNodes(self, username = None):
+ return hostname_list
+
+ def GetReservedNodes(self, username=None):
""" Get list of leases. Get the leases for the username if specified,
otherwise get all the leases. Finds the nodes hostnames for each
OAR node identifier.
:param username: user's LDAP login
:type username: string
- :return: list of reservations dict
+ :returns: list of reservations dict
:rtype: dict list
"""
-
+
#Get the nodes in use and the reserved nodes
reservation_dict_list = \
self.oar.parser.SendRequest("GET_reserved_nodes", \
username = username)
-
-
+
+ # Get the full node dict list once for all
+ # so that we can get the hostnames given their oar node id afterwards
+ # when the reservations are checked.
+ full_nodes_dict_list = self.GetNodes()
+ #Put the full node list into a dictionary keyed by oar node id
+ oar_id_node_dict = {}
+ for node in full_nodes_dict_list:
+ oar_id_node_dict[node['oar_id']] = node
+
for resa in reservation_dict_list:
logger.debug ("GetReservedNodes resa %s"%(resa))
#dict list of hostnames and their site
resa['reserved_nodes'] = \
- self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
-
+ self.__get_hostnames_from_oar_node_ids(oar_id_node_dict,
+ resa['resource_ids'])
+
#del resa['resource_ids']
return reservation_dict_list
-
- def GetNodes(self, node_filter_dict = None, return_fields_list = None):
+
+ def GetNodes(self, node_filter_dict=None, return_fields_list=None):
"""
-
- Make a list of senslab nodes and their properties from information
- given by OAR. Search for specific nodes if some filters are specified.
- Nodes properties returned if no return_fields_list given:
- 'hrn','archi','mobile','hostname','site','boot_state','node_id',
- 'radio','posx','posy','oar_id','posz'.
-
- :param node_filter_dict: dictionnary of lists with node properties
+
+ Make a list of iotlab nodes and their properties from information
+ given by OAR. Search for specific nodes if some filters are
+ specified. Nodes properties returned if no return_fields_list given:
+ 'hrn','archi','mobile','hostname','site','boot_state','node_id',
+ 'radio','posx','posy','oar_id','posz'.
+
+ :param node_filter_dict: dictionnary of lists with node properties. For
+ instance, if you want to look for a specific node with its hrn,
+ the node_filter_dict should be {'hrn': [hrn_of_the_node]}
:type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
+ :param return_fields_list: list of specific fields the user wants to be
+ returned.
:type return_fields_list: list
- :return: list of dictionaries with node properties
+ :returns: list of dictionaries with node properties
:rtype: list
-
+
"""
node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
node_dict_list = node_dict_by_id.values()
- logger.debug (" SLABDRIVER GetNodes node_filter_dict %s \
- return_fields_list %s "%(node_filter_dict, return_fields_list))
+ logger.debug (" IOTLAB_API GetNodes node_filter_dict %s \
+ return_fields_list %s " % (node_filter_dict, return_fields_list))
#No filtering needed return the list directly
if not (node_filter_dict or return_fields_list):
return node_dict_list
-
+
return_node_list = []
if node_filter_dict:
for filter_key in node_filter_dict:
try:
- #Filter the node_dict_list by each value contained in the
+ #Filter the node_dict_list by each value contained in the
#list node_filter_dict[filter_key]
for value in node_filter_dict[filter_key]:
for node in node_dict_list:
if node[filter_key] == value:
- if return_fields_list :
+ if return_fields_list:
tmp = {}
for k in return_fields_list:
- tmp[k] = node[k]
+ tmp[k] = node[k]
return_node_list.append(tmp)
else:
return_node_list.append(node)
return return_node_list
-
-
-
+
+
+
@staticmethod
def AddSlice(slice_record, user_record):
- """Add slice to the local senslab sfa tables if the slice comes
- from a federated site and is not yet in the senslab sfa DB,
- although the user has already a LDAP login.
- Called by verify_slice during lease/sliver creation.
+ """
+
+ Add slice to the local iotlab sfa tables if the slice comes
+ from a federated site and is not yet in the iotlab sfa DB,
+ although the user has already a LDAP login.
+ Called by verify_slice during lease/sliver creation.
+
:param slice_record: record of slice, must contain hrn, gid, slice_id
- and authority of the slice.
+ and authority of the slice.
:type slice_record: dictionary
:param user_record: record of the user
:type user_record: RegUser
+
"""
-
- sfa_record = RegSlice(hrn=slice_record['hrn'],
- gid=slice_record['gid'],
- pointer=slice_record['slice_id'],
- authority=slice_record['authority'])
-
- logger.debug("SLABDRIVER.PY AddSlice sfa_record %s user_record %s" \
- %(sfa_record, user_record))
+
+ sfa_record = RegSlice(hrn=slice_record['hrn'],
+ gid=slice_record['gid'],
+ pointer=slice_record['slice_id'],
+ authority=slice_record['authority'])
+ logger.debug("IOTLAB_API.PY AddSlice sfa_record %s user_record %s"
+ % (sfa_record, user_record))
sfa_record.just_created()
dbsession.add(sfa_record)
- dbsession.commit()
+ dbsession.commit()
#Update the reg-researcher dependance table
- sfa_record.reg_researchers = [user_record]
- dbsession.commit()
-
+ sfa_record.reg_researchers = [user_record]
+ dbsession.commit()
+
return
-
- def GetSites(self, site_filter_name_list = None, return_fields_list = None):
+
+ def GetSites(self, site_filter_name_list=None, return_fields_list=None):
+ """Returns the list of Iotlab's sites with the associated nodes and
+ the sites' properties as dictionaries.
+
+ Site properties:
+ ['address_ids', 'slice_ids', 'name', 'node_ids', 'url', 'person_ids',
+ 'site_tag_ids', 'enabled', 'site', 'longitude', 'pcu_ids',
+ 'max_slivers', 'max_slices', 'ext_consortium_id', 'date_created',
+ 'latitude', 'is_public', 'peer_site_id', 'peer_id', 'abbreviated_name']
+ Uses the OAR request GET_sites to find the Iotlab's sites.
+
+ :param site_filter_name_list: used to specify specific sites
+ :param return_fields_list: field that has to be returned
+ :type site_filter_name_list: list
+ :type return_fields_list: list
+
+
+ """
site_dict = self.oar.parser.SendRequest("GET_sites")
#site_dict : dict where the key is the sit ename
return_site_list = []
- if not ( site_filter_name_list or return_fields_list):
+ if not (site_filter_name_list or return_fields_list):
return_site_list = site_dict.values()
return return_site_list
-
+
for site_filter_name in site_filter_name_list:
if site_filter_name in site_dict:
if return_fields_list:
try:
tmp[field] = site_dict[site_filter_name][field]
except KeyError:
- logger.error("GetSites KeyError %s "%(field))
+ logger.error("GetSites KeyError %s " % (field))
return None
return_site_list.append(tmp)
else:
- return_site_list.append( site_dict[site_filter_name])
-
+ return_site_list.append(site_dict[site_filter_name])
return return_site_list
-
-
-
- #TODO : Check rights to delete person
+ #TODO : Check rights to delete person
def DeletePerson(self, person_record):
- """ Disable an existing account in senslab LDAP.
- Users and techs can only delete themselves. PIs can only
- delete themselves and other non-PIs at their sites.
- ins can delete anyone.
+ """Disable an existing account in iotlab LDAP.
+
+ Users and techs can only delete themselves. PIs can only
+ delete themselves and other non-PIs at their sites.
+ ins can delete anyone.
+
:param person_record: user's record
:type person_record: dict
- :return: True if successful, False otherwise.
+ :returns: True if successful, False otherwise.
:rtype: boolean
-
+
+ .. todo:: CHECK THAT ONLY THE USER OR ADMIN CAN DEL HIMSELF.
"""
- #Disable user account in senslab LDAP
+ #Disable user account in iotlab LDAP
ret = self.ldap.LdapMarkUserAsDeleted(person_record)
- logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
+ logger.warning("IOTLAB_API DeletePerson %s " % (person_record))
return ret['bool']
-
-
+
def DeleteSlice(self, slice_record):
- """ Deletes the specified slice and kills the jobs associated with
- the slice if any, using DeleteSliceFromNodes.
-
- :return: True if all the jobs in the slice have been deleted,
- or the list of jobs that could not be deleted otherwise.
- :rtype: list or boolean
-
+ """Deletes the specified slice and kills the jobs associated with
+ the slice if any, using DeleteSliceFromNodes.
+
+ :param slice_record: record of the slice, must contain oar_job_id, user
+ :type slice_record: dict
+ :returns: True if all the jobs in the slice have been deleted,
+ or the list of jobs that could not be deleted otherwise.
+ :rtype: list or boolean
+
+ .. seealso:: DeleteSliceFromNodes
+
"""
ret = self.DeleteSliceFromNodes(slice_record)
delete_failed = None
if delete_failed is None:
delete_failed = []
delete_failed.append(job_id)
-
- logger.info("SLABDRIVER DeleteSlice %s answer %s"%(slice_record, \
+
+ logger.info("IOTLAB_API DeleteSlice %s answer %s"%(slice_record, \
delete_failed))
return delete_failed or True
-
+
@staticmethod
def __add_person_to_db(user_dict):
"""
- Add a federated user straight to db when the user issues a lease
- request with senslab nodes and that he has not registered with senslab
+ Add a federated user straight to db when the user issues a lease
+ request with iotlab nodes and that he has not registered with iotlab
yet (that is he does not have a LDAP entry yet).
- Uses parts of the routines in SlabImport when importing user from LDAP.
+ Uses parts of the routines in IotlabImport when importing user from LDAP.
Called by AddPerson, right after LdapAddUser.
:param user_dict: Must contain email, hrn and pkey to get a GID
and be added to the SFA db.
:type user_dict: dict
-
+
"""
check_if_exists = \
dbsession.query(RegUser).filter_by(email = user_dict['email']).first()
#user doesn't exists
if not check_if_exists:
logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
- " %(user_dict))
- hrn = user_dict['hrn']
+ " %(user_dict))
+ hrn = user_dict['hrn']
person_urn = hrn_to_urn(hrn, 'user')
pubkey = user_dict['pkey']
try:
logger.warn('__add_person_to_db: unable to convert public \
key for %s' %(hrn ))
pkey = Keypair(create=True)
-
-
+
+
if pubkey is not None and pkey is not None :
hierarchy = Hierarchy()
person_gid = hierarchy.create_gid(person_urn, create_uuid(), \
pkey)
if user_dict['email']:
logger.debug("__add_person_to_db \r\n \r\n \
- SLAB IMPORTER PERSON EMAIL OK email %s "\
+ IOTLAB IMPORTER PERSON EMAIL OK email %s "\
%(user_dict['email']))
person_gid.set_email(user_dict['email'])
-
+
user_record = RegUser(hrn=hrn , pointer= '-1', \
authority=get_authority(hrn), \
email=user_dict['email'], gid = person_gid)
user_record.just_created()
dbsession.add (user_record)
dbsession.commit()
- return
-
+ return
+
def AddPerson(self, record):
- """Adds a new account. Any fields specified in records are used,
- otherwise defaults are used. Creates an appropriate login by calling
- LdapAddUser.
+ """
+
+ Adds a new account. Any fields specified in records are used,
+ otherwise defaults are used. Creates an appropriate login by calling
+ LdapAddUser.
+
:param record: dictionary with the sfa user's properties.
- :return: The uid of the added person if sucessful, otherwise returns
- the error message from LDAP.
- :rtype: interger or string
+ :returns: a dicitonary with the status. If successful, the dictionary
+ boolean is set to True and there is a 'uid' key with the new login
+ added to LDAP, otherwise the bool is set to False and a key
+ 'message' is in the dictionary, with the error message.
+ :rtype: dict
+
"""
ret = self.ldap.LdapAddUser(record)
-
+
if ret['bool'] is True:
record['hrn'] = self.root_auth + '.' + ret['uid']
- logger.debug("SLABDRIVER AddPerson return code %s record %s \r\n "\
- %(ret, record))
+ logger.debug("IOTLAB_API AddPerson return code %s record %s "
+ % (ret, record))
self.__add_person_to_db(record)
- return ret['uid']
- else:
- return ret['message']
-
-
-
+ return ret
+
+
+
+
+
#TODO AddPersonKey 04/07/2012 SA
def AddPersonKey(self, person_uid, old_attributes_dict, new_key_dict):
"""Adds a new key to the specified account. Adds the key to the
- senslab ldap, provided that the person_uid is valid.
+ iotlab ldap, provided that the person_uid is valid.
+
Non-admins can only modify their own keys.
-
- :param person_uid: user's senslab login in LDAP
- :param old_attributes_dict: dict with the user's old sshPublicKey
- :param new_key_dict:dict with the user's new sshPublicKey
+
+ :param person_uid: user's iotlab login in LDAP
+ :param old_attributes_dict: dict with the user's old sshPublicKey
+ :param new_key_dict: dict with the user's new sshPublicKey
:type person_uid: string
-
+
:rtype: Boolean
- :return: True if the key has been modified, False otherwise.
-
+ :returns: True if the key has been modified, False otherwise.
+
"""
ret = self.ldap.LdapModify(person_uid, old_attributes_dict, \
new_key_dict)
- logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
+ logger.warning("IOTLAB_API AddPersonKey EMPTY - DO NOTHING \r\n ")
return ret['bool']
-
- def DeleteLeases(self, leases_id_list, slice_hrn ):
+
+ def DeleteLeases(self, leases_id_list, slice_hrn):
"""
+
Deletes several leases, based on their job ids and the slice
- they are associated with. Uses DeleteJobs to delete the jobs
- on OAR. Note that one slice can contain multiple jobs, and in this case
- all the jobs in the leases_id_list MUST belong to ONE slice,
- since there is only one slice hrn provided here.
+ they are associated with. Uses DeleteJobs to delete the jobs
+ on OAR. Note that one slice can contain multiple jobs, and in this
+ case all the jobs in the leases_id_list MUST belong to ONE slice,
+ since there is only one slice hrn provided here.
+
:param leases_id_list: list of job ids that belong to the slice whose
- slice hrn is provided.
- :param slice_hrn: the slice hrn .
- ..warning: Does not have a return value since there was no easy
- way to handle failure when dealing with multiple job delete. Plus,
- there was no easy way to report it to the user.
- """
- logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
+ slice hrn is provided.
+ :param slice_hrn: the slice hrn.
+ :type slice_hrn: string
+
+ .. warning:: Does not have a return value since there was no easy
+ way to handle failure when dealing with multiple job delete. Plus,
+ there was no easy way to report it to the user.
+
+ """
+ logger.debug("IOTLAB_API DeleteLeases leases_id_list %s slice_hrn %s \
\r\n " %(leases_id_list, slice_hrn))
for job_id in leases_id_list:
self.DeleteJobs(job_id, slice_hrn)
- return
+ return
@staticmethod
def _process_walltime(duration):
""" Calculates the walltime in seconds from the duration in H:M:S
specified in the RSpec.
-
+
"""
if duration:
- # Fixing the walltime by adding a few delays.
+ # Fixing the walltime by adding a few delays.
# First put the walltime in seconds oarAdditionalDelay = 20;
# additional delay for /bin/sleep command to
# take in account prologue and epilogue scripts execution
# int walltimeAdditionalDelay = 240; additional delay
+ #for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
+ #in oar.conf
# Put the duration in seconds first
- desired_walltime = duration * 60
-
+ #desired_walltime = duration * 60
+ desired_walltime = duration
total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
walltime = []
else:
logger.log_exc(" __process_walltime duration null")
-
- return walltime, sleep_walltime
-
- @staticmethod
+
+ return walltime, sleep_walltime
+
+ @staticmethod
def _create_job_structure_request_for_OAR(lease_dict):
""" Creates the structure needed for a correct POST on OAR.
Makes the timestamp transformation into the appropriate format.
- Sends the POST request to create the job with the resources in
+ Sends the POST request to create the job with the resources in
added_nodes.
-
+
"""
nodeid_list = []
reqdict = {}
-
- reqdict['workdir'] = '/tmp'
- reqdict['resource'] = "{network_address in ("
- for node in lease_dict['added_nodes']:
+ reqdict['workdir'] = '/tmp'
+ reqdict['resource'] = "{network_address in ("
+
+ for node in lease_dict['added_nodes']:
logger.debug("\r\n \r\n OARrestapi \t \
__create_job_structure_request_for_OAR node %s" %(node))
- # Get the ID of the node
+ # Get the ID of the node
nodeid = node
reqdict['resource'] += "'" + nodeid + "', "
nodeid_list.append(nodeid)
walltime, sleep_walltime = \
- SlabTestbedAPI._process_walltime(\
+ IotlabTestbedAPI._process_walltime(\
int(lease_dict['lease_duration']))
reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
#In case of a scheduled experiment (not immediate)
- #To run an XP immediately, don't specify date and time in RSpec
+ #To run an XP immediately, don't specify date and time in RSpec
#They will be set to None.
if lease_dict['lease_start_time'] is not '0':
#Readable time accepted by OAR
int(lease_dict['lease_start_time'])).\
strftime(lease_dict['time_format'])
reqdict['reservation'] = start_time
- #If there is not start time, Immediate XP. No need to add special
+ #If there is not start time, Immediate XP. No need to add special
# OAR parameters
- reqdict['type'] = "deploy"
+ reqdict['type'] = "deploy"
reqdict['directory'] = ""
reqdict['name'] = "SFA_" + lease_dict['slice_user']
return reqdict
-
-
+
+
def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
lease_start_time, lease_duration, slice_user=None):
-
+
"""
- Create a job request structure based on the information provided
- and post the job on OAR.
+ Create a job request structure based on the information provided
+ and post the job on OAR.
:param added_nodes: list of nodes that belong to the described lease.
:param slice_name: the slice hrn associated to the lease.
:param lease_start_time: timestamp of the lease startting time.
:param lease_duration: lease durationin minutes
-
+
"""
lease_dict = {}
lease_dict['lease_start_time'] = lease_start_time
lease_dict['grain'] = self.GetLeaseGranularity()
lease_dict['time_format'] = self.time_format
-
- logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR slice_user %s\
- \r\n " %(slice_user))
+
+ logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR slice_user %s\
+ \r\n " %(slice_user))
#Create the request for OAR
reqdict = self._create_job_structure_request_for_OAR(lease_dict)
- # first step : start the OAR job and update the job
- logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
- \r\n " %(reqdict))
-
+ # first step : start the OAR job and update the job
+ logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR reqdict %s\
+ \r\n " %(reqdict))
+
answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
reqdict, slice_user)
- logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
- try:
+ logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s " %(answer))
+ try:
jobid = answer['id']
except KeyError:
- logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
+ logger.log_exc("IOTLAB_API \tLaunchExperimentOnOAR \
Impossible to create job %s " %(answer))
return None
-
-
-
+
+
+
if jobid :
- logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
+ logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s \
added_nodes %s slice_user %s" %(jobid, added_nodes, \
slice_user))
-
-
+
+
return jobid
-
-
- def AddLeases(self, hostname_list, slice_record, \
- lease_start_time, lease_duration):
-
+
+
+ def AddLeases(self, hostname_list, slice_record,
+ lease_start_time, lease_duration):
+
"""Creates a job in OAR corresponding to the information provided
- as parameters. Adds the job id and the slice hrn in the senslab
+ as parameters. Adds the job id and the slice hrn in the iotlab
database so that we are able to know which slice has which nodes.
-
- :param hostname_list: list of nodes' OAR hostnames.
+
+ :param hostname_list: list of nodes' OAR hostnames.
:param slice_record: sfa slice record, must contain login and hrn.
:param lease_start_time: starting time , unix timestamp format
:param lease_duration: duration in minutes
-
+
:type hostname_list: list
:type slice_record: dict
:type lease_start_time: integer
:type lease_duration: integer
-
+
"""
- logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
+ logger.debug("IOTLAB_API \r\n \r\n \t AddLeases hostname_list %s \
slice_record %s lease_start_time %s lease_duration %s "\
%( hostname_list, slice_record , lease_start_time, \
lease_duration))
strftime(self.time_format)
end_time = lease_start_time + lease_duration
-
- logger.debug("SLABDRIVER \r\n \r\n \t AddLeases TURN ON LOGGING SQL \
+
+ logger.debug("IOTLAB_API \r\n \r\n \t AddLeases TURN ON LOGGING SQL \
%s %s %s "%(slice_record['hrn'], job_id, end_time))
-
-
- logger.debug("SLABDRIVER \r\n \r\n \t AddLeases %s %s %s " \
+
+
+ logger.debug("IOTLAB_API \r\n \r\n \t AddLeases %s %s %s " \
%(type(slice_record['hrn']), type(job_id), type(end_time)))
-
- slab_ex_row = SenslabXP(slice_hrn = slice_record['hrn'], \
- job_id = job_id, end_time= end_time)
-
- logger.debug("SLABDRIVER \r\n \r\n \t AddLeases slab_ex_row %s" \
- %(slab_ex_row))
- slab_dbsession.add(slab_ex_row)
- slab_dbsession.commit()
-
- logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " \
+
+ iotlab_ex_row = LeaseTableXP(slice_hrn = slice_record['hrn'], experiment_id=job_id,
+ end_time= end_time)
+
+ logger.debug("IOTLAB_API \r\n \r\n \t AddLeases iotlab_ex_row %s" \
+ %(iotlab_ex_row))
+ self.iotlab_db.testbed_session.add(iotlab_ex_row)
+ self.iotlab_db.testbed_session.commit()
+
+ logger.debug("IOTLAB_API \t AddLeases hostname_list start_time %s " \
%(start_time))
-
+
return
-
-
- #Delete the jobs from job_senslab table
+
+
+ #Delete the jobs from job_iotlab table
def DeleteSliceFromNodes(self, slice_record):
- """ Deletes all the running or scheduled jobs of a given slice
- given its record.
- :param slice_record: record of the slice
+ """
+
+ Deletes all the running or scheduled jobs of a given slice
+ given its record.
+
+ :param slice_record: record of the slice, must contain oar_job_id, user
:type slice_record: dict
-
- :return: dict of the jobs'deletion status. Success= True, Failure=
- False, for each job id.
+
+ :returns: dict of the jobs'deletion status. Success= True, Failure=
+ False, for each job id.
:rtype: dict
+
"""
- logger.debug("SLABDRIVER \t DeleteSliceFromNodese %s " %(slice_record))
-
- if isinstance(slice_record['oar_job_id'], list):
+ logger.debug("IOTLAB_API \t DeleteSliceFromNodes %s "
+ % (slice_record))
+
+ if isinstance(slice_record['oar_job_id'], list):
oar_bool_answer = {}
for job_id in slice_record['oar_job_id']:
ret = self.DeleteJobs(job_id, slice_record['user'])
-
+
oar_bool_answer.update(ret)
else:
- oar_bool_answer = [self.DeleteJobs(slice_record['oar_job_id'], \
- slice_record['user'])]
-
+ oar_bool_answer = [self.DeleteJobs(slice_record['oar_job_id'],
+ slice_record['user'])]
+
return oar_bool_answer
-
-
-
+
+
+
def GetLeaseGranularity(self):
- """ Returns the granularity of an experiment in the Senslab testbed.
- OAR uses seconds for experiments duration , the granulaity is also
- defined in seconds.
+ """ Returns the granularity of an experiment in the Iotlab testbed.
+ OAR uses seconds for experiments duration , the granulaity is also
+ defined in seconds.
Experiments which last less than 10 min (600 sec) are invalid"""
return self.grain
-
-
+
+
+ # @staticmethod
+ # def update_experiments_in_additional_sfa_db( job_oar_list, jobs_psql):
+ # """ Cleans the iotlab db by deleting expired and cancelled jobs.
+ # Compares the list of job ids given by OAR with the job ids that
+ # are already in the database, deletes the jobs that are no longer in
+ # the OAR job id list.
+ # :param job_oar_list: list of job ids coming from OAR
+ # :type job_oar_list: list
+ # :param job_psql: list of job ids cfrom the database.
+ # type job_psql: list
+ # """
+ # #Turn the list into a set
+ # set_jobs_psql = set(jobs_psql)
+
+ # kept_jobs = set(job_oar_list).intersection(set_jobs_psql)
+ # logger.debug ( "\r\n \t\ update_experiments_in_additional_sfa_db jobs_psql %s \r\n \t \
+ # job_oar_list %s kept_jobs %s "%(set_jobs_psql, job_oar_list, kept_jobs))
+ # deleted_jobs = set_jobs_psql.difference(kept_jobs)
+ # deleted_jobs = list(deleted_jobs)
+ # if len(deleted_jobs) > 0:
+ # self.iotlab_db.testbed_session.query(LeaseTableXP).filter(LeaseTableXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
+ # self.iotlab_db.testbed_session.commit()
+
+ # return
+
@staticmethod
- def update_jobs_in_slabdb( job_oar_list, jobs_psql):
- """ Cleans the slab db by deleting expired and cancelled jobs.
- Compares the list of job ids given by OAR with the job ids that
- are already in the database, deletes the jobs that are no longer in
- the OAR job id list.
- :param job_oar_list: list of job ids coming from OAR
- :type job_oar_list: list
- :param job_psql: list of job ids cfrom the database.
- type job_psql: list
- """
- #Turn the list into a set
- set_jobs_psql = set(jobs_psql)
-
- kept_jobs = set(job_oar_list).intersection(set_jobs_psql)
- logger.debug ( "\r\n \t\ update_jobs_in_slabdb jobs_psql %s \r\n \t \
- job_oar_list %s kept_jobs %s "%(set_jobs_psql, job_oar_list, kept_jobs))
- deleted_jobs = set_jobs_psql.difference(kept_jobs)
- deleted_jobs = list(deleted_jobs)
- if len(deleted_jobs) > 0:
- slab_dbsession.query(SenslabXP).filter(SenslabXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
- slab_dbsession.commit()
-
- return
+ def filter_lease_name(reservation_list, filter_value):
+ filtered_reservation_list = list(reservation_list)
+ logger.debug("IOTLAB_API \t filter_lease_name reservation_list %s" \
+ % (reservation_list))
+ for reservation in reservation_list:
+ if 'slice_hrn' in reservation and \
+ reservation['slice_hrn'] != filter_value:
+ filtered_reservation_list.remove(reservation)
+
+ logger.debug("IOTLAB_API \t filter_lease_name filtered_reservation_list %s" \
+ % (filtered_reservation_list))
+ return filtered_reservation_list
+
+ @staticmethod
+ def filter_lease_start_time(reservation_list, filter_value):
+ filtered_reservation_list = list(reservation_list)
+
+ for reservation in reservation_list:
+ if 't_from' in reservation and \
+ reservation['t_from'] > filter_value:
+ filtered_reservation_list.remove(reservation)
+
+ return filtered_reservation_list
+
-
-
def GetLeases(self, lease_filter_dict=None, login=None):
- """ Get the list of leases from OAR with complete information
- about which slice owns which jobs and nodes.
- Two purposes:
- -Fetch all the jobs from OAR (running, waiting..)
- complete the reservation information with slice hrn
- found in slabxp table. If not available in the table,
- assume it is a senslab slice.
- -Updates the slab table, deleting jobs when necessary.
- :return: reservation_list, list of dictionaries with 'lease_id',
- 'reserved_nodes','slice_id', 'state', 'user', 'component_id_list',
- 'slice_hrn', 'resource_ids', 't_from', 't_until'
+ """
+
+ Get the list of leases from OAR with complete information
+ about which slice owns which jobs and nodes.
+ Two purposes:
+ -Fetch all the jobs from OAR (running, waiting..)
+ complete the reservation information with slice hrn
+ found in testbed_xp table. If not available in the table,
+ assume it is a iotlab slice.
+ -Updates the iotlab table, deleting jobs when necessary.
+
+ :returns: reservation_list, list of dictionaries with 'lease_id',
+ 'reserved_nodes','slice_id', 'state', 'user', 'component_id_list',
+ 'slice_hrn', 'resource_ids', 't_from', 't_until'
:rtype: list
+
"""
-
+
unfiltered_reservation_list = self.GetReservedNodes(login)
reservation_list = []
- #Find the slice associated with this user senslab ldap uid
- logger.debug(" SLABDRIVER.PY \tGetLeases login %s\
- unfiltered_reservation_list %s " %(login, unfiltered_reservation_list))
+ #Find the slice associated with this user iotlab ldap uid
+ logger.debug(" IOTLAB_API.PY \tGetLeases login %s\
+ unfiltered_reservation_list %s "
+ % (login, unfiltered_reservation_list))
#Create user dict first to avoid looking several times for
#the same user in LDAP SA 27/07/12
job_oar_list = []
-
- jobs_psql_query = slab_dbsession.query(SenslabXP).all()
- jobs_psql_dict = dict([(row.job_id, row.__dict__ ) for row in jobs_psql_query ])
+
+ jobs_psql_query = self.iotlab_db.testbed_session.query(LeaseTableXP).all()
+ jobs_psql_dict = dict([(row.experiment_id, row.__dict__)
+ for row in jobs_psql_query])
#jobs_psql_dict = jobs_psql_dict)
- logger.debug("SLABDRIVER \tGetLeases jobs_psql_dict %s"\
- %(jobs_psql_dict))
- jobs_psql_id_list = [ row.job_id for row in jobs_psql_query ]
-
-
-
+ logger.debug("IOTLAB_API \tGetLeases jobs_psql_dict %s"
+ % (jobs_psql_dict))
+ jobs_psql_id_list = [row.experiment_id for row in jobs_psql_query]
+
for resa in unfiltered_reservation_list:
- logger.debug("SLABDRIVER \tGetLeases USER %s"\
- %(resa['user']))
- #Construct list of jobs (runing, waiting..) in oar
- job_oar_list.append(resa['lease_id'])
- #If there is information on the job in SLAB DB ]
- #(slice used and job id)
+ logger.debug("IOTLAB_API \tGetLeases USER %s"
+ % (resa['user']))
+ #Construct list of jobs (runing, waiting..) in oar
+ job_oar_list.append(resa['lease_id'])
+ #If there is information on the job in IOTLAB DB ]
+ #(slice used and job id)
if resa['lease_id'] in jobs_psql_dict:
job_info = jobs_psql_dict[resa['lease_id']]
- logger.debug("SLABDRIVER \tGetLeases job_info %s"\
- %(job_info))
+ logger.debug("IOTLAB_API \tGetLeases job_info %s"
+ % (job_info))
resa['slice_hrn'] = job_info['slice_hrn']
resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a senslab slice:
+
+ #otherwise, assume it is a iotlab slice:
else:
- resa['slice_id'] = hrn_to_urn(self.root_auth+'.'+ \
- resa['user'] +"_slice" , 'slice')
+ resa['slice_id'] = hrn_to_urn(self.root_auth + '.' +
+ resa['user'] + "_slice", 'slice')
resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
- resa['component_id_list'] = []
+ resa['component_id_list'] = []
#Transform the hostnames into urns (component ids)
for node in resa['reserved_nodes']:
-
- slab_xrn = slab_xrn_object(self.root_auth, node)
- resa['component_id_list'].append(slab_xrn.urn)
-
- if lease_filter_dict:
- logger.debug("SLABDRIVER \tGetLeases resa_ %s \
- \r\n leasefilter %s" %(resa, lease_filter_dict))
-
- if lease_filter_dict['name'] == resa['slice_hrn']:
- reservation_list.append(resa)
-
+
+ iotlab_xrn = iotlab_xrn_object(self.root_auth, node)
+ resa['component_id_list'].append(iotlab_xrn.urn)
+
+ if lease_filter_dict:
+ logger.debug("IOTLAB_API \tGetLeases \
+ \r\n leasefilter %s" % ( lease_filter_dict))
+
+ filter_dict_functions = {
+ 'slice_hrn' : IotlabTestbedAPI.filter_lease_name,
+ 't_from' : IotlabTestbedAPI.filter_lease_start_time
+ }
+ reservation_list = list(unfiltered_reservation_list)
+ for filter_type in lease_filter_dict:
+ logger.debug("IOTLAB_API \tGetLeases reservation_list %s" \
+ % (reservation_list))
+ reservation_list = filter_dict_functions[filter_type](\
+ reservation_list,lease_filter_dict[filter_type] )
+
+ # Filter the reservation list with a maximum timespan so that the
+ # leases and jobs running after this timestamp do not appear
+ # in the result leases.
+ # if 'start_time' in :
+ # if resa['start_time'] < lease_filter_dict['start_time']:
+ # reservation_list.append(resa)
+
+
+ # if 'name' in lease_filter_dict and \
+ # lease_filter_dict['name'] == resa['slice_hrn']:
+ # reservation_list.append(resa)
+
+
if lease_filter_dict is None:
reservation_list = unfiltered_reservation_list
-
-
- self.update_jobs_in_slabdb(job_oar_list, jobs_psql_id_list)
-
- logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
- %(reservation_list))
+
+ self.iotlab_db.update_experiments_in_additional_sfa_db(job_oar_list, jobs_psql_id_list)
+
+ logger.debug(" IOTLAB_API.PY \tGetLeases reservation_list %s"
+ % (reservation_list))
return reservation_list
-
-
-
+
+
+
#TODO FUNCTIONS SECTION 04/07/2012 SA
#@staticmethod
#def UnBindObjectFromPeer( auth, object_type, object_id, shortname):
#""" This method is a hopefully temporary hack to let the sfa correctly
- #detach the objects it creates from a remote peer object. This is
- #needed so that the sfa federation link can work in parallel with
- #RefreshPeer, as RefreshPeer depends on remote objects being correctly
+ #detach the objects it creates from a remote peer object. This is
+ #needed so that the sfa federation link can work in parallel with
+ #RefreshPeer, as RefreshPeer depends on remote objects being correctly
#marked.
#Parameters:
#auth : struct, API authentication structure
- #AuthMethod : string, Authentication method to use
+ #AuthMethod : string, Authentication method to use
#object_type : string, Object type, among 'site','person','slice',
#'node','key'
#object_id : int, object_id
- #shortname : string, peer shortname
+ #shortname : string, peer shortname
#FROM PLC DOC
-
+
#"""
- #logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
+ #logger.warning("IOTLAB_API \tUnBindObjectFromPeer EMPTY-\
#DO NOTHING \r\n ")
- #return
-
- ##TODO Is BindObjectToPeer still necessary ? Currently does nothing
+ #return
+
+ ##TODO Is BindObjectToPeer still necessary ? Currently does nothing
##04/07/2012 SA
#|| Commented out 28/05/13 SA
#def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
#remote_object_id=None):
- #"""This method is a hopefully temporary hack to let the sfa correctly
- #attach the objects it creates to a remote peer object. This is needed
- #so that the sfa federation link can work in parallel with RefreshPeer,
+ #"""This method is a hopefully temporary hack to let the sfa correctly
+ #attach the objects it creates to a remote peer object. This is needed
+ #so that the sfa federation link can work in parallel with RefreshPeer,
#as RefreshPeer depends on remote objects being correctly marked.
#Parameters:
- #shortname : string, peer shortname
- #remote_object_id : int, remote object_id, set to 0 if unknown
+ #shortname : string, peer shortname
+ #remote_object_id : int, remote object_id, set to 0 if unknown
#FROM PLC API DOC
-
+
#"""
- #logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
+ #logger.warning("IOTLAB_API \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
#return
-
+
##TODO UpdateSlice 04/07/2012 SA || Commented out 28/05/13 SA
- ##Funciton should delete and create another job since oin senslab slice=job
- #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
- #"""Updates the parameters of an existing slice with the values in
+ ##Funciton should delete and create another job since oin iotlab slice=job
+ #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
+ #"""Updates the parameters of an existing slice with the values in
#slice_fields.
- #Users may only update slices of which they are members.
- #PIs may update any of the slices at their sites, or any slices of
+ #Users may only update slices of which they are members.
+ #PIs may update any of the slices at their sites, or any slices of
#which they are members. Admins may update any slice.
#Only PIs and admins may update max_nodes. Slices cannot be renewed
#(by updating the expires parameter) more than 8 weeks into the future.
#Returns 1 if successful, faults otherwise.
#FROM PLC API DOC
-
- #"""
- #logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
+
+ #"""
+ #logger.warning("IOTLAB_API UpdateSlice EMPTY - DO NOTHING \r\n ")
#return
-
+
#Unused SA 30/05/13, we only update the user's key or we delete it.
##TODO UpdatePerson 04/07/2012 SA
- #def UpdatePerson(self, slab_hrn, federated_hrn, person_fields=None):
- #"""Updates a person. Only the fields specified in person_fields
+ #def UpdatePerson(self, iotlab_hrn, federated_hrn, person_fields=None):
+ #"""Updates a person. Only the fields specified in person_fields
#are updated, all other fields are left untouched.
#Users and techs can only update themselves. PIs can only update
#themselves and other non-PIs at their sites.
#Returns 1 if successful, faults otherwise.
#FROM PLC API DOC
-
+
#"""
- ##new_row = FederatedToSenslab(slab_hrn, federated_hrn)
- ##slab_dbsession.add(new_row)
- ##slab_dbsession.commit()
-
- #logger.debug("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
+ ##new_row = FederatedToIotlab(iotlab_hrn, federated_hrn)
+ ##self.iotlab_db.testbed_session.add(new_row)
+ ##self.iotlab_db.testbed_session.commit()
+
+ #logger.debug("IOTLAB_API UpdatePerson EMPTY - DO NOTHING \r\n ")
#return
-
+
@staticmethod
def GetKeys(key_filter=None):
"""Returns a dict of dict based on the key string. Each dict entry
- contains the key id, the ssh key, the user's email and the
+ contains the key id, the ssh key, the user's email and the
user's hrn.
- If key_filter is specified and is an array of key identifiers,
- only keys matching the filter will be returned.
+ If key_filter is specified and is an array of key identifiers,
+ only keys matching the filter will be returned.
Admin may query all keys. Non-admins may only query their own keys.
FROM PLC API DOC
-
- :return: dict with ssh key as key and dicts as value.
+
+ :returns: dict with ssh key as key and dicts as value.
:rtype: dict
"""
if key_filter is None:
keys = dbsession.query(RegKey).options(joinedload('reg_user')).all()
- else :
+ else:
keys = dbsession.query(RegKey).options(joinedload('reg_user')).filter(RegKey.key.in_(key_filter)).all()
-
+
key_dict = {}
for key in keys:
- key_dict[key.key] = {'key_id': key.key_id, 'key': key.key, \
- 'email': key.reg_user.email, 'hrn':key.reg_user.hrn}
-
+ key_dict[key.key] = {'key_id': key.key_id, 'key': key.key,
+ 'email': key.reg_user.email,
+ 'hrn': key.reg_user.hrn}
+
#ldap_rslt = self.ldap.LdapSearch({'enabled']=True})
#user_by_email = dict((user[1]['mail'][0], user[1]['sshPublicKey']) \
#for user in ldap_rslt)
-
- logger.debug("SLABDRIVER GetKeys -key_dict %s \r\n " %(key_dict))
+
+ logger.debug("IOTLAB_API GetKeys -key_dict %s \r\n " % (key_dict))
return key_dict
-
+
#TODO : test
def DeleteKey(self, user_record, key_string):
- """ Deletes a key in the LDAP entry of the specified user.
+ """Deletes a key in the LDAP entry of the specified user.
+
Removes the key_string from the user's key list and updates the LDAP
- user's entry with the new key attributes.
+ user's entry with the new key attributes.
+
:param key_string: The ssh key to remove
:param user_record: User's record
:type key_string: string
:type user_record: dict
- :return: True if sucessful, False if not.
+ :returns: True if sucessful, False if not.
:rtype: Boolean
-
+
"""
all_user_keys = user_record['keys']
all_user_keys.remove(key_string)
- new_attributes = {'sshPublicKey':all_user_keys}
+ new_attributes = {'sshPublicKey':all_user_keys}
ret = self.ldap.LdapModifyUser(user_record, new_attributes)
- logger.debug("SLABDRIVER DeleteKey %s- "%(ret))
+ logger.debug("IOTLAB_API DeleteKey %s- " % (ret))
return ret['bool']
-
-
-
- @staticmethod
- def _sql_get_slice_info( slice_filter ):
+
+
+
+ @staticmethod
+ def _sql_get_slice_info(slice_filter):
"""
- Get the slice record based on the slice hrn. Fetch the record of the
- user associated with the slice by usingjoinedload based on t
- he reg_researcher relationship.
+ Get the slice record based on the slice hrn. Fetch the record of the
+ user associated with the slice by using joinedload based on the
+ reg_researcher relationship.
+
:param slice_filter: the slice hrn we are looking for
:type slice_filter: string
- :return: the slice record enhanced with the user's information if the
- slice was found, None it wasn't.
+ :returns: the slice record enhanced with the user's information if the
+ slice was found, None it wasn't.
+
:rtype: dict or None.
"""
- #DO NOT USE RegSlice - reg_researchers to get the hrn
- #of the user otherwise will mess up the RegRecord in
+ #DO NOT USE RegSlice - reg_researchers to get the hrn
+ #of the user otherwise will mess up the RegRecord in
#Resolve, don't know why - SA 08/08/2012
-
- #Only one entry for one user = one slice in slab_xp table
+
+ #Only one entry for one user = one slice in testbed_xp table
#slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
- raw_slicerec = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn = slice_filter).first()
+ raw_slicerec = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn=slice_filter).first()
#raw_slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
- if raw_slicerec:
+ if raw_slicerec:
#load_reg_researcher
#raw_slicerec.reg_researchers
raw_slicerec = raw_slicerec.__dict__
- logger.debug(" SLABDRIVER \t get_slice_info slice_filter %s \
- raw_slicerec %s"%(slice_filter, raw_slicerec))
+ logger.debug(" IOTLAB_API \t _sql_get_slice_info slice_filter %s \
+ raw_slicerec %s" % (slice_filter, raw_slicerec))
slicerec = raw_slicerec
#only one researcher per slice so take the first one
#slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
#del slicerec['reg_researchers']['_sa_instance_state']
return slicerec
-
- else :
+
+ else:
return None
-
- @staticmethod
- def _sql_get_slice_info_from_user(slice_filter ):
+
+ @staticmethod
+ def _sql_get_slice_info_from_user(slice_filter):
"""
- Get the slice record based on the user recordid by using a joinedload
+ Get the slice record based on the user recordid by using a joinedload
on the relationship reg_slices_as_researcher. Format the sql record
into a dict with the mandatory fields for user and slice.
- :return: dict with slice record and user record if the record was found
+ :returns: dict with slice record and user record if the record was found
based on the user's id, None if not..
:rtype:dict or None..
"""
#slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- raw_slicerec = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id = slice_filter).first()
+ raw_slicerec = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id=slice_filter).first()
#raw_slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- #Put it in correct order
- user_needed_fields = ['peer_authority', 'hrn', 'last_updated', 'classtype', 'authority', 'gid', 'record_id', 'date_created', 'type', 'email', 'pointer']
- slice_needed_fields = ['peer_authority', 'hrn', 'last_updated', 'classtype', 'authority', 'gid', 'record_id', 'date_created', 'type', 'pointer']
+ #Put it in correct order
+ user_needed_fields = ['peer_authority', 'hrn', 'last_updated',
+ 'classtype', 'authority', 'gid', 'record_id',
+ 'date_created', 'type', 'email', 'pointer']
+ slice_needed_fields = ['peer_authority', 'hrn', 'last_updated',
+ 'classtype', 'authority', 'gid', 'record_id',
+ 'date_created', 'type', 'pointer']
if raw_slicerec:
#raw_slicerec.reg_slices_as_researcher
raw_slicerec = raw_slicerec.__dict__
slicerec = {}
slicerec = \
- dict([(k, raw_slicerec['reg_slices_as_researcher'][0].__dict__[k]) \
- for k in slice_needed_fields])
- slicerec['reg_researchers'] = dict([(k, raw_slicerec[k]) \
- for k in user_needed_fields])
+ dict([(k, raw_slicerec[
+ 'reg_slices_as_researcher'][0].__dict__[k])
+ for k in slice_needed_fields])
+ slicerec['reg_researchers'] = dict([(k, raw_slicerec[k])
+ for k in user_needed_fields])
#TODO Handle multiple slices for one user SA 10/12/12
#for now only take the first slice record associated to the rec user
##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
#del raw_slicerec['reg_slices_as_researcher']
#slicerec['reg_researchers'] = raw_slicerec
##del slicerec['_sa_instance_state']
-
+
return slicerec
-
+
else:
return None
-
- def _get_slice_records(self, slice_filter = None, \
- slice_filter_type = None):
+
+ def _get_slice_records(self, slice_filter=None,
+ slice_filter_type=None):
"""
Get the slice record depending on the slice filter and its type.
:param slice_filter: Can be either the slice hrn or the user's record
:type slice_filter: string
:param slice_filter_type: describes the slice filter type used, can be
slice_hrn or record_id_user
- :type: string
- :return: the slice record
- :rtype:dict
- ..seealso:_sql_get_slice_info_from_user
- ..seealso: _sql_get_slice_info
+ :type: string
+ :returns: the slice record
+ :rtype:dict
+ .. seealso::_sql_get_slice_info_from_user
+ .. seealso:: _sql_get_slice_info
"""
-
+
#Get list of slices based on the slice hrn
if slice_filter_type == 'slice_hrn':
-
+
#if get_authority(slice_filter) == self.root_auth:
- #login = slice_filter.split(".")[1].split("_")[0]
-
+ #login = slice_filter.split(".")[1].split("_")[0]
+
slicerec = self._sql_get_slice_info(slice_filter)
-
+
if slicerec is None:
- return None
- #return login, None
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
-
+ return None
+ #return login, None
+
+ #Get slice based on user id
+ if slice_filter_type == 'record_id_user':
+
slicerec = self._sql_get_slice_info_from_user(slice_filter)
-
+
if slicerec:
fixed_slicerec_dict = slicerec
- #At this point if there is no login it means
+ #At this point if there is no login it means
#record_id_user filter has been used for filtering
#if login is None :
- ##If theslice record is from senslab
+ ##If theslice record is from iotlab
#if fixed_slicerec_dict['peer_authority'] is None:
- #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
#return login, fixed_slicerec_dict
- return fixed_slicerec_dict
-
-
-
- def GetSlices(self, slice_filter = None, slice_filter_type = None, \
- login=None):
- """ Get the slice records from the slab db and add lease information
- if any.
-
+ return fixed_slicerec_dict
+ else:
+ return None
+
+
+ def GetSlices(self, slice_filter=None, slice_filter_type=None,
+ login=None):
+ """Get the slice records from the iotlab db and add lease information
+ if any.
+
:param slice_filter: can be the slice hrn or slice record id in the db
- depending on the slice_filter_type.
- :param slice_filter_type: defines the type of the filtering used, Can be
- either 'slice_hrn' or "record_id'.
+ depending on the slice_filter_type.
+ :param slice_filter_type: defines the type of the filtering used, Can be
+ either 'slice_hrn' or "record_id'.
:type slice_filter: string
:type slice_filter_type: string
- :return: a slice dict if slice_filter and slice_filter_type
- are specified and a matching entry is found in the db. The result
- is put into a list.Or a list of slice dictionnaries if no filters are
- specified.
-
+ :returns: a slice dict if slice_filter and slice_filter_type
+ are specified and a matching entry is found in the db. The result
+ is put into a list.Or a list of slice dictionnaries if no filters
+ arespecified.
+
:rtype: list
+
"""
#login = None
authorized_filter_types_list = ['slice_hrn', 'record_id_user']
return_slicerec_dictlist = []
-
- #First try to get information on the slice based on the filter provided
+
+ #First try to get information on the slice based on the filter provided
if slice_filter_type in authorized_filter_types_list:
- fixed_slicerec_dict = \
- self._get_slice_records(slice_filter, slice_filter_type)
+ fixed_slicerec_dict = self._get_slice_records(slice_filter,
+ slice_filter_type)
+ # if the slice was not found in the sfa db
+ if fixed_slicerec_dict is None:
+ return return_slicerec_dictlist
+
slice_hrn = fixed_slicerec_dict['hrn']
-
- logger.debug(" SLABDRIVER \tGetSlices login %s \
+
+ logger.debug(" IOTLAB_API \tGetSlices login %s \
slice record %s slice_filter %s \
- slice_filter_type %s " %(login, \
- fixed_slicerec_dict, slice_filter, \
+ slice_filter_type %s " % (login,
+ fixed_slicerec_dict, slice_filter,
slice_filter_type))
-
-
- #Now we have the slice record fixed_slicerec_dict, get the
+
+
+ #Now we have the slice record fixed_slicerec_dict, get the
#jobs associated to this slice
leases_list = []
-
- leases_list = self.GetLeases(login = login)
- #If no job is running or no job scheduled
- #return only the slice record
+
+ leases_list = self.GetLeases(login=login)
+ #If no job is running or no job scheduled
+ #return only the slice record
if leases_list == [] and fixed_slicerec_dict:
return_slicerec_dictlist.append(fixed_slicerec_dict)
-
- #If several jobs for one slice , put the slice record into
+
+ # if the jobs running don't belong to the user/slice we are looking
+ # for
+ leases_hrn = [lease['slice_hrn'] for lease in leases_list]
+ if slice_hrn not in leases_hrn:
+ return_slicerec_dictlist.append(fixed_slicerec_dict)
+ #If several jobs for one slice , put the slice record into
# each lease information dict
-
-
- for lease in leases_list :
- slicerec_dict = {}
- logger.debug("SLABDRIVER.PY \tGetSlices slice_filter %s \
- \ lease['slice_hrn'] %s" \
- %(slice_filter, lease['slice_hrn']))
- if lease['slice_hrn'] == slice_hrn:
- slicerec_dict['slice_hrn'] = lease['slice_hrn']
- slicerec_dict['hrn'] = lease['slice_hrn']
- slicerec_dict['user'] = lease['user']
+ for lease in leases_list:
+ slicerec_dict = {}
+ logger.debug("IOTLAB_API.PY \tGetSlices slice_filter %s \
+ \t lease['slice_hrn'] %s"
+ % (slice_filter, lease['slice_hrn']))
+ if lease['slice_hrn'] == slice_hrn:
slicerec_dict['oar_job_id'] = lease['lease_id']
- slicerec_dict.update({'list_node_ids':{'hostname':lease['reserved_nodes']}})
- slicerec_dict.update({'node_ids':lease['reserved_nodes']})
-
#Update lease dict with the slice record
if fixed_slicerec_dict:
fixed_slicerec_dict['oar_job_id'] = []
- fixed_slicerec_dict['oar_job_id'].append(slicerec_dict['oar_job_id'])
+ fixed_slicerec_dict['oar_job_id'].append(
+ slicerec_dict['oar_job_id'])
slicerec_dict.update(fixed_slicerec_dict)
#slicerec_dict.update({'hrn':\
#str(fixed_slicerec_dict['slice_hrn'])})
-
- return_slicerec_dictlist.append(slicerec_dict)
- logger.debug("SLABDRIVER.PY \tGetSlices \
- OHOHOHOH %s" %(return_slicerec_dictlist ))
-
- logger.debug("SLABDRIVER.PY \tGetSlices \
+ slicerec_dict['slice_hrn'] = lease['slice_hrn']
+ slicerec_dict['hrn'] = lease['slice_hrn']
+ slicerec_dict['user'] = lease['user']
+ slicerec_dict.update(
+ {'list_node_ids':
+ {'hostname': lease['reserved_nodes']}})
+ slicerec_dict.update({'node_ids': lease['reserved_nodes']})
+
+
+
+ return_slicerec_dictlist.append(slicerec_dict)
+ logger.debug("IOTLAB_API.PY \tGetSlices \
+ OHOHOHOH %s" %(return_slicerec_dictlist))
+
+ logger.debug("IOTLAB_API.PY \tGetSlices \
slicerec_dict %s return_slicerec_dictlist %s \
lease['reserved_nodes'] \
- %s" %(slicerec_dict, return_slicerec_dictlist, \
- lease['reserved_nodes'] ))
-
- logger.debug("SLABDRIVER.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s" \
- %(return_slicerec_dictlist))
-
+ %s" % (slicerec_dict, return_slicerec_dictlist,
+ lease['reserved_nodes']))
+
+ logger.debug("IOTLAB_API.PY \tGetSlices RETURN \
+ return_slicerec_dictlist %s"
+ % (return_slicerec_dictlist))
+
return return_slicerec_dictlist
-
-
+
+
else:
- #Get all slices from the senslab sfa database ,
- #put them in dict format
- #query_slice_list = dbsession.query(RegRecord).all()
- query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
+ #Get all slices from the iotlab sfa database ,
+ #put them in dict format
+ #query_slice_list = dbsession.query(RegRecord).all()
+ query_slice_list = \
+ dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
- for record in query_slice_list:
+ for record in query_slice_list:
tmp = record.__dict__
tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
#del tmp['reg_researchers']['_sa_instance_state']
return_slicerec_dictlist.append(tmp)
#return_slicerec_dictlist.append(record.__dict__)
-
+
#Get all the jobs reserved nodes
leases_list = self.GetReservedNodes()
-
-
+
for fixed_slicerec_dict in return_slicerec_dictlist:
- slicerec_dict = {}
- #Check if the slice belongs to a senslab user
+ slicerec_dict = {}
+ #Check if the slice belongs to a iotlab user
if fixed_slicerec_dict['peer_authority'] is None:
- owner = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ owner = fixed_slicerec_dict['hrn'].split(
+ ".")[1].split("_")[0]
else:
owner = None
- for lease in leases_list:
+ for lease in leases_list:
if owner == lease['user']:
slicerec_dict['oar_job_id'] = lease['lease_id']
#for reserved_node in lease['reserved_nodes']:
- logger.debug("SLABDRIVER.PY \tGetSlices lease %s "\
- %(lease ))
-
- slicerec_dict.update({'node_ids':lease['reserved_nodes']})
- slicerec_dict.update({'list_node_ids':{'hostname':lease['reserved_nodes']}})
+ logger.debug("IOTLAB_API.PY \tGetSlices lease %s "
+ % (lease))
slicerec_dict.update(fixed_slicerec_dict)
+ slicerec_dict.update({'node_ids':
+ lease['reserved_nodes']})
+ slicerec_dict.update({'list_node_ids':
+ {'hostname':
+ lease['reserved_nodes']}})
+
#slicerec_dict.update({'hrn':\
#str(fixed_slicerec_dict['slice_hrn'])})
#return_slicerec_dictlist.append(slicerec_dict)
fixed_slicerec_dict.update(slicerec_dict)
-
- logger.debug("SLABDRIVER.PY \tGetSlices RETURN \
+
+ logger.debug("IOTLAB_API.PY \tGetSlices RETURN \
return_slicerec_dictlist %s \slice_filter %s " \
%(return_slicerec_dictlist, slice_filter))
return return_slicerec_dictlist
-
- #Update slice unused, therefore sfa_fields_to_slab_fields unused
+
+ #Update slice unused, therefore sfa_fields_to_iotlab_fields unused
#SA 30/05/13
#@staticmethod
- #def sfa_fields_to_slab_fields(sfa_type, hrn, record):
+ #def sfa_fields_to_iotlab_fields(sfa_type, hrn, record):
#"""
#"""
- #slab_record = {}
+ #iotlab_record = {}
##for field in record:
- ## slab_record[field] = record[field]
-
+ ## iotlab_record[field] = record[field]
+
#if sfa_type == "slice":
- ##instantion used in get_slivers ?
- #if not "instantiation" in slab_record:
- #slab_record["instantiation"] = "senslab-instantiated"
- ##slab_record["hrn"] = hrn_to_pl_slicename(hrn)
- ##Unused hrn_to_pl_slicename because Slab's hrn already
+ ##instantion used in get_slivers ?
+ #if not "instantiation" in iotlab_record:
+ #iotlab_record["instantiation"] = "iotlab-instantiated"
+ ##iotlab_record["hrn"] = hrn_to_pl_slicename(hrn)
+ ##Unused hrn_to_pl_slicename because Iotlab's hrn already
##in the appropriate form SA 23/07/12
- #slab_record["hrn"] = hrn
- #logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
- #slab_record %s " %(slab_record['hrn']))
+ #iotlab_record["hrn"] = hrn
+ #logger.debug("IOTLAB_API.PY sfa_fields_to_iotlab_fields \
+ #iotlab_record %s " %(iotlab_record['hrn']))
#if "url" in record:
- #slab_record["url"] = record["url"]
+ #iotlab_record["url"] = record["url"]
#if "description" in record:
- #slab_record["description"] = record["description"]
+ #iotlab_record["description"] = record["description"]
#if "expires" in record:
- #slab_record["expires"] = int(record["expires"])
-
+ #iotlab_record["expires"] = int(record["expires"])
+
##nodes added by OAR only and then imported to SFA
##elif type == "node":
- ##if not "hostname" in slab_record:
+ ##if not "hostname" in iotlab_record:
##if not "hostname" in record:
##raise MissingSfaInfo("hostname")
- ##slab_record["hostname"] = record["hostname"]
- ##if not "model" in slab_record:
- ##slab_record["model"] = "geni"
-
- ##One authority only
+ ##iotlab_record["hostname"] = record["hostname"]
+ ##if not "model" in iotlab_record:
+ ##iotlab_record["model"] = "geni"
+
+ ##One authority only
##elif type == "authority":
- ##slab_record["login_base"] = hrn_to_slab_login_base(hrn)
+ ##iotlab_record["login_base"] = hrn_to_iotlab_login_base(hrn)
+
+ ##if not "name" in iotlab_record:
+ ##iotlab_record["name"] = hrn
+
+ ##if not "abbreviated_name" in iotlab_record:
+ ##iotlab_record["abbreviated_name"] = hrn
+
+ ##if not "enabled" in iotlab_record:
+ ##iotlab_record["enabled"] = True
+
+ ##if not "is_public" in iotlab_record:
+ ##iotlab_record["is_public"] = True
+
+ #return iotlab_record
+
+
- ##if not "name" in slab_record:
- ##slab_record["name"] = hrn
- ##if not "abbreviated_name" in slab_record:
- ##slab_record["abbreviated_name"] = hrn
- ##if not "enabled" in slab_record:
- ##slab_record["enabled"] = True
- ##if not "is_public" in slab_record:
- ##slab_record["is_public"] = True
- #return slab_record
-
-
-
-
-
-
-
-
\ No newline at end of file
--- /dev/null
+"""
+Implements what a driver should provide for SFA to work.
+"""
+from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
+from sfa.util.sfalogging import logger
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
+
+from sfa.iotlab.iotlabaggregate import IotlabAggregate, iotlab_xrn_to_hostname
+from sfa.iotlab.iotlabslices import IotlabSlices
+
+
+from sfa.iotlab.iotlabapi import IotlabTestbedAPI
+
+
+class IotlabDriver(Driver):
+ """ Iotlab Driver class inherited from Driver generic class.
+
+ Contains methods compliant with the SFA standard and the testbed
+ infrastructure (calls to LDAP and OAR).
+
+ .. seealso::: Driver class
+
+ """
+ def __init__(self, config):
+ """
+
+ Sets the iotlab SFA config parameters,
+ instanciates the testbed api and the iotlab database.
+
+ :param config: iotlab SFA configuration object
+ :type config: Config object
+
+ """
+ Driver.__init__(self, config)
+ self.config = config
+ self.iotlab_api = IotlabTestbedAPI(config)
+ self.cache = None
+
+ def augment_records_with_testbed_info(self, record_list):
+ """
+
+ Adds specific testbed info to the records.
+
+ :param record_list: list of sfa dictionaries records
+ :type record_list: list
+ :returns: list of records with extended information in each record
+ :rtype: list
+
+ """
+ return self.fill_record_info(record_list)
+
+ def fill_record_info(self, record_list):
+ """
+
+ For each SFA record, fill in the iotlab specific and SFA specific
+ fields in the record.
+
+ :param record_list: list of sfa dictionaries records
+ :type record_list: list
+ :returns: list of records with extended information in each record
+ :rtype: list
+
+ .. warning:: Should not be modifying record_list directly because modi
+ fication are kept outside the method's scope. Howerver, there is no
+ other way to do it given the way it's called in registry manager.
+
+ """
+
+ logger.debug("IOTLABDRIVER \tfill_record_info records %s "
+ % (record_list))
+ if not isinstance(record_list, list):
+ record_list = [record_list]
+
+ try:
+ for record in record_list:
+
+ if str(record['type']) == 'node':
+ # look for node info using GetNodes
+ # the record is about one node only
+ filter_dict = {'hrn': [record['hrn']]}
+ node_info = self.iotlab_api.GetNodes(filter_dict)
+ # the node_info is about one node only, but it is formatted
+ # as a list
+ record.update(node_info[0])
+ logger.debug("IOTLABDRIVER.PY \t \
+ fill_record_info NODE" % (record))
+
+ #If the record is a SFA slice record, then add information
+ #about the user of this slice. This kind of
+ #information is in the Iotlab's DB.
+ if str(record['type']) == 'slice':
+ if 'reg_researchers' in record and isinstance(record
+ ['reg_researchers'],
+ list):
+ record['reg_researchers'] = \
+ record['reg_researchers'][0].__dict__
+ record.update(
+ {'PI': [record['reg_researchers']['hrn']],
+ 'researcher': [record['reg_researchers']['hrn']],
+ 'name': record['hrn'],
+ 'oar_job_id': [],
+ 'node_ids': [],
+ 'person_ids': [record['reg_researchers']
+ ['record_id']],
+ # For client_helper.py compatibility
+ 'geni_urn': '',
+ # For client_helper.py compatibility
+ 'keys': '',
+ # For client_helper.py compatibility
+ 'key_ids': ''})
+
+ #Get iotlab slice record and oar job id if any.
+ recslice_list = self.iotlab_api.GetSlices(
+ slice_filter=str(record['hrn']),
+ slice_filter_type='slice_hrn')
+
+ logger.debug("IOTLABDRIVER \tfill_record_info \
+ TYPE SLICE RECUSER record['hrn'] %s record['oar_job_id']\
+ %s " % (record['hrn'], record['oar_job_id']))
+ del record['reg_researchers']
+ try:
+ for rec in recslice_list:
+ logger.debug("IOTLABDRIVER\r\n \t \
+ fill_record_info oar_job_id %s "
+ % (rec['oar_job_id']))
+
+ record['node_ids'] = [self.iotlab_api.root_auth +
+ '.' + hostname for hostname
+ in rec['node_ids']]
+ except KeyError:
+ pass
+
+ logger.debug("IOTLABDRIVER.PY \t fill_record_info SLICE \
+ recslice_list %s \r\n \t RECORD %s \r\n \
+ \r\n" % (recslice_list, record))
+
+ if str(record['type']) == 'user':
+ #The record is a SFA user record.
+ #Get the information about his slice from Iotlab's DB
+ #and add it to the user record.
+ recslice_list = self.iotlab_api.GetSlices(
+ slice_filter=record['record_id'],
+ slice_filter_type='record_id_user')
+
+ logger.debug("IOTLABDRIVER.PY \t fill_record_info \
+ TYPE USER recslice_list %s \r\n \t RECORD %s \r\n"
+ % (recslice_list, record))
+ #Append slice record in records list,
+ #therefore fetches user and slice info again(one more loop)
+ #Will update PIs and researcher for the slice
+
+ recuser = recslice_list[0]['reg_researchers']
+ logger.debug("IOTLABDRIVER.PY \t fill_record_info USER \
+ recuser %s \r\n \r\n" % (recuser))
+ recslice = {}
+ recslice = recslice_list[0]
+ recslice.update(
+ {'PI': [recuser['hrn']],
+ 'researcher': [recuser['hrn']],
+ 'name': record['hrn'],
+ 'node_ids': [],
+ 'oar_job_id': [],
+ 'person_ids': [recuser['record_id']]})
+ try:
+ for rec in recslice_list:
+ recslice['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
+
+ recslice.update({'type': 'slice',
+ 'hrn': recslice_list[0]['hrn']})
+
+ #GetPersons takes [] as filters
+ user_iotlab = self.iotlab_api.GetPersons([record])
+
+ record.update(user_iotlab[0])
+ #For client_helper.py compatibility
+ record.update(
+ {'geni_urn': '',
+ 'keys': '',
+ 'key_ids': ''})
+ record_list.append(recslice)
+
+ logger.debug("IOTLABDRIVER.PY \t \
+ fill_record_info ADDING SLICE\
+ INFO TO USER records %s" % (record_list))
+
+ except TypeError, error:
+ logger.log_exc("IOTLABDRIVER \t fill_record_info EXCEPTION %s"
+ % (error))
+
+ return record_list
+
+ def sliver_status(self, slice_urn, slice_hrn):
+ """
+ Receive a status request for slice named urn/hrn
+ urn:publicid:IDN+iotlab+nturro_slice hrn iotlab.nturro_slice
+ shall return a structure as described in
+ http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ NT : not sure if we should implement this or not, but used by sface.
+
+ :param slice_urn: slice urn
+ :type slice_urn: string
+ :param slice_hrn: slice hrn
+ :type slice_hrn: string
+
+ """
+
+ #First get the slice with the slice hrn
+ slice_list = self.iotlab_api.GetSlices(slice_filter=slice_hrn,
+ slice_filter_type='slice_hrn')
+
+ if len(slice_list) == 0:
+ raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
+
+ #Used for fetching the user info witch comes along the slice info
+ one_slice = slice_list[0]
+
+ #Make a list of all the nodes hostnames in use for this slice
+ slice_nodes_list = []
+ slice_nodes_list = one_slice['node_ids']
+ #Get all the corresponding nodes details
+ nodes_all = self.iotlab_api.GetNodes(
+ {'hostname': slice_nodes_list},
+ ['node_id', 'hostname', 'site', 'boot_state'])
+ nodeall_byhostname = dict([(one_node['hostname'], one_node)
+ for one_node in nodes_all])
+
+ for single_slice in slice_list:
+ #For compatibility
+ top_level_status = 'empty'
+ result = {}
+ result.fromkeys(
+ ['geni_urn', 'geni_error', 'iotlab_login', 'geni_status',
+ 'geni_resources'], None)
+ # result.fromkeys(\
+ # ['geni_urn','geni_error', 'pl_login','geni_status',
+ # 'geni_resources'], None)
+ # result['pl_login'] = one_slice['reg_researchers'][0].hrn
+ result['iotlab_login'] = one_slice['user']
+ logger.debug("Slabdriver - sliver_status Sliver status \
+ urn %s hrn %s single_slice %s \r\n "
+ % (slice_urn, slice_hrn, single_slice))
+
+ if 'node_ids' not in single_slice:
+ #No job in the slice
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = []
+ return result
+
+ top_level_status = 'ready'
+
+ #A job is running on Iotlab for this slice
+ # report about the local nodes that are in the slice only
+
+ result['geni_urn'] = slice_urn
+
+ resources = []
+ for node_hostname in single_slice['node_ids']:
+ res = {}
+ res['iotlab_hostname'] = node_hostname
+ res['iotlab_boot_state'] = \
+ nodeall_byhostname[node_hostname]['boot_state']
+
+ #res['pl_hostname'] = node['hostname']
+ #res['pl_boot_state'] = \
+ #nodeall_byhostname[node['hostname']]['boot_state']
+ #res['pl_last_contact'] = strftime(self.time_format, \
+ #gmtime(float(timestamp)))
+ sliver_id = Xrn(
+ slice_urn, type='slice',
+ id=nodeall_byhostname[node_hostname]['node_id']).urn
+
+ res['geni_urn'] = sliver_id
+ #node_name = node['hostname']
+ if nodeall_byhostname[node_hostname]['boot_state'] == 'Alive':
+
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ logger.debug("IOTLABDRIVER \tsliver_statusresources %s res %s "
+ % (resources, res))
+ return result
+
+ @staticmethod
+ def get_user_record(hrn):
+ """
+
+ Returns the user record based on the hrn from the SFA DB .
+
+ :param hrn: user's hrn
+ :type hrn: string
+ :returns: user record from SFA database
+ :rtype: RegUser
+
+ """
+ return dbsession.query(RegRecord).filter_by(hrn=hrn).first()
+
+ def testbed_name(self):
+ """
+
+ Returns testbed's name.
+ :returns: testbed authority name.
+ :rtype: string
+
+ """
+ return self.hrn
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version(self):
+ """
+
+ Returns the testbed's supported rspec advertisement and request
+ versions.
+ :returns: rspec versions supported ad a dictionary.
+ :rtype: dict
+
+ """
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed': self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions}
+
+ def _get_requested_leases_list(self, rspec):
+ """
+ Process leases in rspec depending on the rspec version (format)
+ type. Find the lease requests in the rspec and creates
+ a lease request list with the mandatory information ( nodes,
+ start time and duration) of the valid leases (duration above or
+ equal to the iotlab experiment minimum duration).
+
+ :param rspec: rspec request received.
+ :type rspec: RSpec
+ :returns: list of lease requests found in the rspec
+ :rtype: list
+ """
+ requested_lease_list = []
+ for lease in rspec.version.get_leases():
+ single_requested_lease = {}
+ logger.debug("IOTLABDRIVER.PY \t \
+ _get_requested_leases_list lease %s " % (lease))
+
+ if not lease.get('lease_id'):
+ if get_authority(lease['component_id']) == \
+ self.iotlab_api.root_auth:
+ single_requested_lease['hostname'] = \
+ iotlab_xrn_to_hostname(\
+ lease.get('component_id').strip())
+ single_requested_lease['start_time'] = \
+ lease.get('start_time')
+ single_requested_lease['duration'] = lease.get('duration')
+ #Check the experiment's duration is valid before adding
+ #the lease to the requested leases list
+ duration_in_seconds = \
+ int(single_requested_lease['duration'])
+ if duration_in_seconds >= self.iotlab_api.GetMinExperimentDurationInGranularity():
+ requested_lease_list.append(single_requested_lease)
+
+ return requested_lease_list
+
+ @staticmethod
+ def _group_leases_by_start_time(requested_lease_list):
+ """
+ Create dict of leases by start_time, regrouping nodes reserved
+ at the same time, for the same amount of time so as to
+ define one job on OAR.
+
+ :param requested_lease_list: list of leases
+ :type requested_lease_list: list
+ :returns: Dictionary with key = start time, value = list of leases
+ with the same start time.
+ :rtype: dictionary
+
+ """
+
+ requested_job_dict = {}
+ for lease in requested_lease_list:
+
+ #In case it is an asap experiment start_time is empty
+ if lease['start_time'] == '':
+ lease['start_time'] = '0'
+
+ if lease['start_time'] not in requested_job_dict:
+ if isinstance(lease['hostname'], str):
+ lease['hostname'] = [lease['hostname']]
+
+ requested_job_dict[lease['start_time']] = lease
+
+ else:
+ job_lease = requested_job_dict[lease['start_time']]
+ if lease['duration'] == job_lease['duration']:
+ job_lease['hostname'].append(lease['hostname'])
+
+ return requested_job_dict
+
+ def _process_requested_jobs(self, rspec):
+ """
+ Turns the requested leases and information into a dictionary
+ of requested jobs, grouped by starting time.
+
+ :param rspec: RSpec received
+ :type rspec : RSpec
+ :rtype: dictionary
+
+ """
+ requested_lease_list = self._get_requested_leases_list(rspec)
+ logger.debug("IOTLABDRIVER _process_requested_jobs \
+ requested_lease_list %s" % (requested_lease_list))
+ job_dict = self._group_leases_by_start_time(requested_lease_list)
+ logger.debug("IOTLABDRIVER _process_requested_jobs job_dict\
+ %s" % (job_dict))
+
+ return job_dict
+
+ def create_sliver(self, slice_urn, slice_hrn, creds, rspec_string,
+ users, options):
+ """Answer to CreateSliver.
+
+ Creates the leases and slivers for the users from the information
+ found in the rspec string.
+ Launch experiment on OAR if the requested leases is valid. Delete
+ no longer requested leases.
+
+
+ :param creds: user's credentials
+ :type creds: string
+ :param users: user record list
+ :type users: list
+ :param options:
+ :type options:
+
+ :returns: a valid Rspec for the slice which has just been
+ modified.
+ :rtype: RSpec
+
+
+ """
+ aggregate = IotlabAggregate(self)
+
+ slices = IotlabSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record = None
+
+ if not isinstance(creds, list):
+ creds = [creds]
+
+ if users:
+ slice_record = users[0].get('slice_record', {})
+ logger.debug("IOTLABDRIVER.PY \t ===============create_sliver \t\
+ creds %s \r\n \r\n users %s"
+ % (creds, users))
+ slice_record['user'] = {'keys': users[0]['keys'],
+ 'email': users[0]['email'],
+ 'hrn': slice_record['reg-researchers'][0]}
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ logger.debug("IOTLABDRIVER.PY \t create_sliver \trspec.version \
+ %s slice_record %s users %s"
+ % (rspec.version, slice_record, users))
+
+ # ensure site record exists?
+ # ensure slice record exists
+ #Removed options in verify_slice SA 14/08/12
+ #Removed peer record in verify_slice SA 18/07/13
+ sfa_slice = slices.verify_slice(slice_hrn, slice_record, sfa_peer)
+
+ # ensure person records exists
+ #verify_persons returns added persons but the return value
+ #is not used
+ #Removed peer record and sfa_peer in verify_persons SA 18/07/13
+ slices.verify_persons(slice_hrn, sfa_slice, users, options=options)
+ #requested_attributes returned by rspec.version.get_slice_attributes()
+ #unused, removed SA 13/08/12
+ #rspec.version.get_slice_attributes()
+
+ logger.debug("IOTLABDRIVER.PY create_sliver slice %s " % (sfa_slice))
+
+ # add/remove slice from nodes
+
+ #requested_slivers = [node.get('component_id') \
+ #for node in rspec.version.get_nodes_with_slivers()\
+ #if node.get('authority_id') is self.iotlab_api.root_auth]
+ #l = [ node for node in rspec.version.get_nodes_with_slivers() ]
+ #logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
+ #requested_slivers %s listnodes %s" \
+ #%(requested_slivers,l))
+ #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
+ #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
+
+ requested_job_dict = self._process_requested_jobs(rspec)
+
+ logger.debug("IOTLABDRIVER.PY \tcreate_sliver requested_job_dict %s "
+ % (requested_job_dict))
+ #verify_slice_leases returns the leases , but the return value is unused
+ #here. Removed SA 13/08/12
+ slices.verify_slice_leases(sfa_slice,
+ requested_job_dict, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn,
+ login=sfa_slice['login'],
+ version=rspec.version)
+
+ def delete_sliver(self, slice_urn, slice_hrn, creds, options):
+ """
+ Deletes the lease associated with the slice hrn and the credentials
+ if the slice belongs to iotlab. Answer to DeleteSliver.
+
+ :param slice_urn: urn of the slice
+ :param slice_hrn: name of the slice
+ :param creds: slice credenials
+ :type slice_urn: string
+ :type slice_hrn: string
+ :type creds: ? unused
+
+ :returns: 1 if the slice to delete was not found on iotlab,
+ True if the deletion was successful, False otherwise otherwise.
+
+ .. note:: Should really be named delete_leases because iotlab does
+ not have any slivers, but only deals with leases. However,
+ SFA api only have delete_sliver define so far. SA 13/05/2013
+ .. note:: creds are unused, and are not used either in the dummy driver
+ delete_sliver .
+ """
+
+ sfa_slice_list = self.iotlab_api.GetSlices(
+ slice_filter=slice_hrn,
+ slice_filter_type='slice_hrn')
+
+ if not sfa_slice_list:
+ return 1
+
+ #Delete all leases in the slice
+ for sfa_slice in sfa_slice_list:
+ logger.debug("IOTLABDRIVER.PY delete_sliver slice %s" % (sfa_slice))
+ slices = IotlabSlices(self)
+ # determine if this is a peer slice
+
+ peer = slices.get_peer(slice_hrn)
+
+ logger.debug("IOTLABDRIVER.PY delete_sliver peer %s \
+ \r\n \t sfa_slice %s " % (peer, sfa_slice))
+ try:
+ self.iotlab_api.DeleteSliceFromNodes(sfa_slice)
+ return True
+ except:
+ return False
+
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ """
+
+ List resources from the iotlab aggregate and returns a Rspec
+ advertisement with resources found when slice_urn and slice_hrn are
+ None (in case of resource discovery).
+ If a slice hrn and urn are provided, list experiment's slice
+ nodes in a rspec format. Answer to ListResources.
+ Caching unused.
+
+ :param slice_urn: urn of the slice
+ :param slice_hrn: name of the slice
+ :param creds: slice credenials
+ :type slice_urn: string
+ :type slice_hrn: string
+ :type creds: ? unused
+ :param options: options used when listing resources (list_leases, info,
+ geni_available)
+ :returns: rspec string in xml
+ :rtype: string
+
+ .. note:: creds are unused
+ """
+
+ #cached_requested = options.get('cached', True)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = \
+ version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_" + \
+ options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_" + \
+ options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + \
+ str(options.get('geni_available'))
+
+ # look in cache first
+ #if cached_requested and self.cache and not slice_hrn:
+ #rspec = self.cache.get(version_string)
+ #if rspec:
+ #logger.debug("IotlabDriver.ListResources: \
+ #returning cached advertisement")
+ #return rspec
+
+ #panos: passing user-defined options
+ aggregate = IotlabAggregate(self)
+
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn,
+ version=rspec_version, options=options)
+
+ # cache the result
+ #if self.cache and not slice_hrn:
+ #logger.debug("Iotlab.ListResources: stores advertisement in cache")
+ #self.cache.add(version_string, rspec)
+
+ return rspec
+
+
+ def list_slices(self, creds, options):
+ """Answer to ListSlices.
+
+ List slices belonging to iotlab, returns slice urns list.
+ No caching used. Options unused but are defined in the SFA method
+ api prototype.
+
+ :returns: slice urns list
+ :rtype: list
+
+ .. note:: creds are unused
+ """
+ # look in cache first
+ #if self.cache:
+ #slices = self.cache.get('slices')
+ #if slices:
+ #logger.debug("PlDriver.list_slices returns from cache")
+ #return slices
+
+ # get data from db
+
+ slices = self.iotlab_api.GetSlices()
+ logger.debug("IOTLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n"
+ % (slices))
+ slice_hrns = [iotlab_slice['hrn'] for iotlab_slice in slices]
+
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice')
+ for slice_hrn in slice_hrns]
+
+ # cache the result
+ #if self.cache:
+ #logger.debug ("IotlabDriver.list_slices stores value in cache")
+ #self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+
+ def register(self, sfa_record, hrn, pub_key):
+ """
+ Adding new user, slice, node or site should not be handled
+ by SFA.
+
+ ..warnings:: should not be used. Different components are in charge of
+ doing this task. Adding nodes = OAR
+ Adding users = LDAP Iotlab
+ Adding slice = Import from LDAP users
+ Adding site = OAR
+
+ :param sfa_record: record provided by the client of the
+ Register API call.
+ :type sfa_record: dict
+ :param pub_key: public key of the user
+ :type pub_key: string
+
+ .. note:: DOES NOTHING. Returns -1.
+
+ """
+ return -1
+
+
+ def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
+ """
+ No site or node record update allowed in Iotlab. The only modifications
+ authorized here are key deletion/addition on an existing user and
+ password change. On an existing user, CAN NOT BE MODIFIED: 'first_name',
+ 'last_name', 'email'. DOES NOT EXIST IN SENSLAB: 'phone', 'url', 'bio',
+ 'title', 'accepted_aup'. A slice is bound to its user, so modifying the
+ user's ssh key should nmodify the slice's GID after an import procedure.
+
+ :param old_sfa_record: what is in the db for this hrn
+ :param new_sfa_record: what was passed to the update call
+ :param new_key: the new user's public key
+ :param hrn: the user's sfa hrn
+ :type old_sfa_record: dict
+ :type new_sfa_record: dict
+ :type new_key: string
+ :type hrn: string
+
+ TODO: needs review
+ .. seealso:: update in driver.py.
+
+ """
+ pointer = old_sfa_record['pointer']
+ old_sfa_record_type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and old_sfa_record_type not in ['user']:
+ raise UnknownSfaType(old_sfa_record_type)
+
+ if old_sfa_record_type == "user":
+ update_fields = {}
+ all_fields = new_sfa_record
+ for key in all_fields.keys():
+ if key in ['key', 'password']:
+ update_fields[key] = all_fields[key]
+
+ if new_key:
+ # must check this key against the previous one if it exists
+ persons = self.iotlab_api.GetPersons([old_sfa_record])
+ person = persons[0]
+ keys = [person['pkey']]
+ #Get all the person's keys
+ keys_dict = self.iotlab_api.GetKeys(keys)
+
+ # Delete all stale keys, meaning the user has only one key
+ #at a time
+ #TODO: do we really want to delete all the other keys?
+ #Is this a problem with the GID generation to have multiple
+ #keys? SA 30/05/13
+ key_exists = False
+ if key in keys_dict:
+ key_exists = True
+ else:
+ #remove all the other keys
+ for key in keys_dict:
+ self.iotlab_api.DeleteKey(person, key)
+ self.iotlab_api.AddPersonKey(
+ person, {'sshPublicKey': person['pkey']},
+ {'sshPublicKey': new_key})
+ return True
+
+ def remove(self, sfa_record):
+ """
+
+ Removes users only. Mark the user as disabled in LDAP. The user and his
+ slice are then deleted from the db by running an import on the registry.
+
+ :param sfa_record: record is the existing sfa record in the db
+ :type sfa_record: dict
+
+ ..warning::As fas as the slice is concerned, here only the leases are
+ removed from the slice. The slice is record itself is not removed
+ from the db.
+
+ TODO: needs review
+
+ TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
+
+ TODO: return boolean for the slice part
+ """
+ sfa_record_type = sfa_record['type']
+ hrn = sfa_record['hrn']
+ if sfa_record_type == 'user':
+
+ #get user from iotlab ldap
+ person = self.iotlab_api.GetPersons(sfa_record)
+ #No registering at a given site in Iotlab.
+ #Once registered to the LDAP, all iotlab sites are
+ #accesible.
+ if person:
+ #Mark account as disabled in ldap
+ return self.iotlab_api.DeletePerson(sfa_record)
+
+ elif sfa_record_type == 'slice':
+ if self.iotlab_api.GetSlices(slice_filter=hrn,
+ slice_filter_type='slice_hrn'):
+ ret = self.iotlab_api.DeleteSlice(sfa_record)
+ return True
--- /dev/null
+"""
+File defining classes to handle the table in the iotlab dedicated database.
+"""
+
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+# from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy import Table, MetaData
+from sqlalchemy.ext.declarative import declarative_base
+
+# from sqlalchemy.dialects import postgresql
+
+from sqlalchemy.exc import NoSuchTableError
+
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
+ CASCADE ON UPDATE CASCADE', 'oar_job_id': 'integer DEFAULT -1',
+ 'record_id_slice': 'integer', 'slice_hrn': 'text NOT NULL'}
+
+#Dict with all the specific iotlab tables
+# tablenames_dict = {'lease_table': slice_table}
+
+
+TestbedBase = declarative_base()
+
+
+class LeaseTableXP (TestbedBase):
+ """ SQL alchemy class to manipulate the rows of the slice_iotlab table in
+ lease_table database. Handles the records representation and creates the
+ table if it does not exist yet.
+
+ """
+ __tablename__ = 'lease_table'
+
+ slice_hrn = Column(String)
+ experiment_id = Column(Integer, primary_key=True)
+ end_time = Column(Integer, nullable=False)
+
+ def __init__(self, slice_hrn=None, experiment_id=None, end_time=None):
+ """
+ Defines a row of the slice_iotlab table
+ """
+ if slice_hrn:
+ self.slice_hrn = slice_hrn
+ if experiment_id:
+ self.experiment_id = experiment_id
+ if end_time:
+ self.end_time = end_time
+
+ def __repr__(self):
+ """Prints the SQLAlchemy record to the format defined
+ by the function.
+ """
+ result = "<lease_table : slice_hrn = %s , experiment_id %s end_time = %s" \
+ % (self.slice_hrn, self.experiment_id, self.end_time)
+ result += ">"
+ return result
+
+
+class TestbedAdditionalSfaDB(object):
+ """ SQL Alchemy connection class.
+ From alchemy.py
+ """
+ # Stores the unique Singleton instance-
+ _connection_singleton = None
+ # defines the database name
+ dbname = "testbed_xp"
+
+ class Singleton:
+ """
+ Class used with this Python singleton design pattern to allow the
+ definition of one single instance of iotlab db session in the whole
+ code. Wherever a conenction to the database is needed, this class
+ returns the same instance every time. Removes the need for global
+ variable throughout the code.
+ """
+
+ def __init__(self, config, debug=False):
+ self.testbed_engine = None
+ self.testbed_session = None
+ self.url = None
+ self.create_testbed_engine(config, debug)
+ self.session()
+
+ def create_testbed_engine(self, config, debug=False):
+ """Creates the SQLAlchemy engine, which is the starting point for
+ any SQLAlchemy application.
+ :param config: configuration object created by SFA based on the
+ configuration file in /etc
+ :param debug: if set to true, echo and echo pool will be set to true
+ as well. If echo is True, all statements as well as a repr() of
+ their parameter lists to the engines logger, which defaults to
+ sys.stdout. If echo_pool is True, the connection pool will log all
+ checkouts/checkins to the logging stream. A python logger can be
+ used to configure this logging directly but so far it has not been
+ configured. Refer to sql alchemy engine documentation.
+
+ :type config: Config instance (sfa.util.config)
+ :type debug: bool
+
+ """
+
+ if debug is True:
+ l_echo_pool = True
+ l_echo = True
+ else:
+ l_echo_pool = False
+ l_echo = False
+ # the former PostgreSQL.py used the psycopg2 directly and was doing
+ #self.connection.set_client_encoding("UNICODE")
+ # it's unclear how to achieve this in sqlalchemy, nor if it's needed
+ # at all
+ # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+ # we indeed have /var/lib/pgsql/data/postgresql.conf where
+ # this setting is unset, it might be an angle to tweak that if need
+ # be try a unix socket first
+ # - omitting the hostname does the trick
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s" \
+ % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
+ config.SFA_DB_PORT, TestbedAdditionalSfaDB.dbname)
+
+ # the TCP fallback method
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s" \
+ % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
+ config.SFA_DB_HOST, config.SFA_DB_PORT, TestbedAdditionalSfaDB.dbname)
+
+ for url in [unix_url, tcp_url]:
+ try:
+ self.testbed_engine = create_engine(
+ url, echo_pool=l_echo_pool, echo=l_echo)
+ self.check()
+ self.url = url
+ return
+ except:
+ pass
+ self.testbed_engine = None
+
+ raise Exception("Could not connect to database")
+
+ def check(self):
+ """ Check if a table exists by trying a selection
+ on the table.
+
+ """
+ self.testbed_engine.execute("select 1").scalar()
+
+
+ def session(self):
+ """
+ Creates a SQLalchemy session. Once the session object is created
+ it should be used throughout the code for all the operations on
+ tables for this given database.
+
+ """
+ if self.testbed_session is None:
+ Session = sessionmaker()
+ self.testbed_session = Session(bind=self.testbed_engine)
+ return self.testbed_session
+
+ def close_session(self):
+ """
+ Closes connection to database.
+
+ """
+ if self.testbed_session is None:
+ return
+ self.testbed_session.close()
+ self.testbed_session = None
+
+
+ def update_experiments_in_additional_sfa_db(self,
+ experiment_list_from_testbed, experiment_list_in_db):
+ """ Cleans the iotlab db by deleting expired and cancelled jobs.
+
+ Compares the list of experiment ids given by the testbed with the
+ experiment ids that are already in the database, deletes the
+ experiments that are no longer in the testbed experiment id list.
+
+ :param experiment_list_from_testbed: list of experiment ids coming
+ from testbed
+ :type experiment_list_from_testbed: list
+ :param experiment_list_in_db: list of experiment ids from the sfa
+ additionnal database.
+ :type experiment_list_in_db: list
+
+ :returns: None
+ """
+ #Turn the list into a set
+ set_experiment_list_in_db = set(experiment_list_in_db)
+
+ kept_experiments = set(experiment_list_from_testbed).intersection(set_experiment_list_in_db)
+ logger.debug("\r\n \t update_experiments_in_additional_sfa_db \
+ experiment_list_in_db %s \r\n \
+ experiment_list_from_testbed %s \
+ kept_experiments %s "
+ % (set_experiment_list_in_db,
+ experiment_list_from_testbed, kept_experiments))
+ deleted_experiments = set_experiment_list_in_db.difference(
+ kept_experiments)
+ deleted_experiments = list(deleted_experiments)
+ if len(deleted_experiments) > 0:
+ self.testbed_session.query(LeaseTableXP).filter(LeaseTableXP.experiment_id.in_(deleted_experiments)).delete(synchronize_session='fetch')
+ self.testbed_session.commit()
+ return
+
+ def __init__(self, config, debug=False):
+ self.sl_base = TestbedBase
+
+ # Check whether we already have an instance
+ if TestbedAdditionalSfaDB._connection_singleton is None:
+ TestbedAdditionalSfaDB._connection_singleton = \
+ TestbedAdditionalSfaDB.Singleton(config, debug)
+
+ # Store instance reference as the only member in the handle
+ self._EventHandler_singleton = \
+ TestbedAdditionalSfaDB._connection_singleton
+
+ def __getattr__(self, aAttr):
+ """
+ Delegate access to implementation.
+
+ :param aAttr: Attribute wanted.
+ :returns: Attribute
+ """
+ return getattr(self._connection_singleton, aAttr)
+
+
+
+ # def __setattr__(self, aAttr, aValue):
+ # """Delegate access to implementation.
+
+ # :param attr: Attribute wanted.
+ # :param value: Vaule to be set.
+ # :return: Result of operation.
+ # """
+ # return setattr(self._connection_singleton, aAttr, aValue)
+
+ def exists(self, tablename):
+ """
+ Checks if the table specified as tablename exists.
+ :param tablename: name of the table in the db that has to be checked.
+ :type tablename: string
+ :returns: True if the table exists, False otherwise.
+ :rtype: bool
+
+ """
+ metadata = MetaData(bind=self.testbed_engine)
+ try:
+ table = Table(tablename, metadata, autoload=True)
+ return True
+
+ except NoSuchTableError:
+ logger.log_exc("SLABPOSTGRES tablename %s does not exist"
+ % (tablename))
+ return False
+
+ def createtable(self):
+ """
+ Creates all the table sof the engine.
+ Uses the global dictionnary holding the tablenames and the table schema.
+
+ """
+
+ logger.debug("IOTLABPOSTGRES createtable \
+ TestbedBase.metadata.sorted_tables %s \r\n engine %s"
+ % (TestbedBase.metadata.sorted_tables, self.testbed_engine))
+ TestbedBase.metadata.create_all(self.testbed_engine)
+ return
--- /dev/null
+"""
+This file defines the IotlabSlices class by which all the slice checkings
+upon lease creation are done.
+"""
+from sfa.util.xrn import get_authority, urn_to_hrn
+from sfa.util.sfalogging import logger
+
+MAXINT = 2L**31-1
+
+
+class IotlabSlices:
+ """
+ This class is responsible for checking the slice when creating a
+ lease or a sliver. Those checks include verifying that the user is valid,
+ that the slice is known from the testbed or from our peers, that the list
+ of nodes involved has not changed (in this case the lease is modified
+ accordingly).
+ """
+ rspec_to_slice_tag = {'max_rate': 'net_max_rate'}
+
+ def __init__(self, driver):
+ """
+ Get the reference to the driver here.
+ """
+ self.driver = driver
+
+ def get_peer(self, xrn):
+ """
+ Finds the authority of a resource based on its xrn.
+ If the authority is Iotlab (local) return None,
+ Otherwise, look up in the DB if Iotlab is federated with this site
+ authority and returns its DB record if it is the case.
+
+ :param xrn: resource's xrn
+ :type xrn: string
+ :returns: peer record
+ :rtype: dict
+
+ """
+ hrn, hrn_type = urn_to_hrn(xrn)
+ #Does this slice belong to a local site or a peer iotlab site?
+ peer = None
+
+ # get this slice's authority (site)
+ slice_authority = get_authority(hrn)
+ #Iotlab stuff
+ #This slice belongs to the current site
+ if slice_authority == self.driver.iotlab_api.root_auth:
+ site_authority = slice_authority
+ return None
+
+ site_authority = get_authority(slice_authority).lower()
+ # get this site's authority (sfa root authority or sub authority)
+
+ logger.debug("IOTLABSLICES \t get_peer slice_authority %s \
+ site_authority %s hrn %s"
+ % (slice_authority, site_authority, hrn))
+
+ # check if we are already peered with this site_authority
+ #if so find the peer record
+ peers = self.driver.iotlab_api.GetPeers(peer_filter=site_authority)
+ for peer_record in peers:
+ if site_authority == peer_record.hrn:
+ peer = peer_record
+ logger.debug(" IOTLABSLICES \tget_peer peer %s " % (peer))
+ return peer
+
+ def get_sfa_peer(self, xrn):
+ """Returns the authority name for the xrn or None if the local site
+ is the authority.
+
+ :param xrn: the xrn of the resource we are looking the authority for.
+ :type xrn: string
+ :returns: the resources's authority name.
+ :rtype: string
+
+ """
+ hrn, hrn_type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+ def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
+ """
+ Compare requested leases with the leases already scheduled/
+ running in OAR. If necessary, delete and recreate modified leases,
+ and delete no longer requested ones.
+
+ :param sfa_slice: sfa slice record
+ :param requested_jobs_dict: dictionary of requested leases
+ :param peer: sfa peer record
+
+ :type sfa_slice: dict
+ :type requested_jobs_dict: dict
+ :type peer: dict
+ :returns: leases list of dictionary
+ :rtype: list
+
+ """
+
+ logger.debug("IOTLABSLICES verify_slice_leases sfa_slice %s "
+ % (sfa_slice))
+ #First get the list of current leases from OAR
+ leases = self.driver.iotlab_api.GetLeases({'slice_hrn': sfa_slice['hrn']})
+ logger.debug("IOTLABSLICES verify_slice_leases requested_jobs_dict %s \
+ leases %s " % (requested_jobs_dict, leases))
+
+ current_nodes_reserved_by_start_time = {}
+ requested_nodes_by_start_time = {}
+ leases_by_start_time = {}
+ reschedule_jobs_dict = {}
+
+ #Create reduced dictionary with key start_time and value
+ # the list of nodes
+ #-for the leases already registered by OAR first
+ # then for the new leases requested by the user
+
+ #Leases already scheduled/running in OAR
+ for lease in leases:
+ current_nodes_reserved_by_start_time[lease['t_from']] = \
+ lease['reserved_nodes']
+ leases_by_start_time[lease['t_from']] = lease
+
+ #First remove job whose duration is too short
+ for job in requested_jobs_dict.values():
+ job['duration'] = \
+ str(int(job['duration']) \
+ * self.driver.iotlab_api.GetLeaseGranularity())
+ if job['duration'] < self.driver.iotlab_api.GetLeaseGranularity():
+ del requested_jobs_dict[job['start_time']]
+
+ #Requested jobs
+ for start_time in requested_jobs_dict:
+ requested_nodes_by_start_time[int(start_time)] = \
+ requested_jobs_dict[start_time]['hostname']
+ #Check if there is any difference between the leases already
+ #registered in OAR and the requested jobs.
+ #Difference could be:
+ #-Lease deleted in the requested jobs
+ #-Added/removed nodes
+ #-Newly added lease
+
+ logger.debug("IOTLABSLICES verify_slice_leases \
+ requested_nodes_by_start_time %s \
+ "% (requested_nodes_by_start_time))
+ #Find all deleted leases
+ start_time_list = \
+ list(set(leases_by_start_time.keys()).\
+ difference(requested_nodes_by_start_time.keys()))
+ deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ for start_time in start_time_list]
+
+
+ #Find added or removed nodes in exisiting leases
+ for start_time in requested_nodes_by_start_time:
+ logger.debug("IOTLABSLICES verify_slice_leases start_time %s \
+ "%( start_time))
+ if start_time in current_nodes_reserved_by_start_time:
+
+ if requested_nodes_by_start_time[start_time] == \
+ current_nodes_reserved_by_start_time[start_time]:
+ continue
+
+ else:
+ update_node_set = \
+ set(requested_nodes_by_start_time[start_time])
+ added_nodes = \
+ update_node_set.difference(\
+ current_nodes_reserved_by_start_time[start_time])
+ shared_nodes = \
+ update_node_set.intersection(\
+ current_nodes_reserved_by_start_time[start_time])
+ old_nodes_set = \
+ set(\
+ current_nodes_reserved_by_start_time[start_time])
+ removed_nodes = \
+ old_nodes_set.difference(\
+ requested_nodes_by_start_time[start_time])
+ logger.debug("IOTLABSLICES verify_slice_leases \
+ shared_nodes %s added_nodes %s removed_nodes %s"\
+ %(shared_nodes, added_nodes,removed_nodes ))
+ #If the lease is modified, delete it before
+ #creating it again.
+ #Add the deleted lease job id in the list
+ #WARNING :rescheduling does not work if there is already
+ # 2 running/scheduled jobs because deleting a job
+ #takes time SA 18/10/2012
+ if added_nodes or removed_nodes:
+ deleted_leases.append(\
+ leases_by_start_time[start_time]['lease_id'])
+ #Reschedule the job
+ if added_nodes or shared_nodes:
+ reschedule_jobs_dict[str(start_time)] = \
+ requested_jobs_dict[str(start_time)]
+
+ else:
+ #New lease
+
+ job = requested_jobs_dict[str(start_time)]
+ logger.debug("IOTLABSLICES \
+ NEWLEASE slice %s job %s"
+ % (sfa_slice, job))
+ self.driver.iotlab_api.AddLeases(
+ job['hostname'],
+ sfa_slice, int(job['start_time']),
+ int(job['duration']))
+
+ #Deleted leases are the ones with lease id not declared in the Rspec
+ if deleted_leases:
+ self.driver.iotlab_api.DeleteLeases(deleted_leases,
+ sfa_slice['user']['uid'])
+ logger.debug("IOTLABSLICES \
+ verify_slice_leases slice %s deleted_leases %s"
+ % (sfa_slice, deleted_leases))
+
+ if reschedule_jobs_dict:
+ for start_time in reschedule_jobs_dict:
+ job = reschedule_jobs_dict[start_time]
+ self.driver.iotlab_api.AddLeases(
+ job['hostname'],
+ sfa_slice, int(job['start_time']),
+ int(job['duration']))
+ return leases
+
+ def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
+ """Check for wanted and unwanted nodes in the slice.
+
+ Removes nodes and associated leases that the user does not want anymore
+ by deleteing the associated job in OAR (DeleteSliceFromNodes).
+ Returns the nodes' hostnames that are going to be in the slice.
+
+ :param sfa_slice: slice record. Must contain node_ids and list_node_ids.
+
+ :param requested_slivers: list of requested nodes' hostnames.
+ :param peer: unused so far.
+
+ :type sfa_slice: dict
+ :type requested_slivers: list
+ :type peer: string
+
+ :returns: list requested nodes hostnames
+ :rtype: list
+
+ .. warning:: UNUSED SQA 24/07/13
+ .. seealso:: DeleteSliceFromNodes
+ .. todo:: check what to do with the peer? Can not remove peer nodes from
+ slice here. Anyway, in this case, the peer should have gotten the
+ remove request too.
+
+ """
+ current_slivers = []
+ deleted_nodes = []
+
+ if 'node_ids' in sfa_slice:
+ nodes = self.driver.iotlab_api.GetNodes(
+ sfa_slice['list_node_ids'],
+ ['hostname'])
+ current_slivers = [node['hostname'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).
+ difference(requested_slivers))
+
+ logger.debug("IOTLABSLICES \tverify_slice_nodes slice %s\
+ \r\n \r\n deleted_nodes %s"
+ % (sfa_slice, deleted_nodes))
+
+ if deleted_nodes:
+ #Delete the entire experience
+ self.driver.iotlab_api.DeleteSliceFromNodes(sfa_slice)
+ return nodes
+
+ def verify_slice(self, slice_hrn, slice_record, sfa_peer):
+ """Ensures slice record exists.
+
+ The slice record must exist either in Iotlab or in the other
+ federated testbed (sfa_peer). If the slice does not belong to Iotlab,
+ check if the user already exists in LDAP. In this case, adds the slice
+ to the sfa DB and associates its LDAP user.
+
+ :param slice_hrn: slice's name
+ :param slice_record: sfa record of the slice
+ :param sfa_peer: name of the peer authority if any.(not Iotlab).
+
+ :type slice_hrn: string
+ :type slice_record: dictionary
+ :type sfa_peer: string
+
+ .. seealso:: AddSlice
+
+
+ """
+
+ slicename = slice_hrn
+ # check if slice belongs to Iotlab
+ slices_list = self.driver.iotlab_api.GetSlices(
+ slice_filter=slicename, slice_filter_type='slice_hrn')
+
+ sfa_slice = None
+
+ if slices_list:
+ for sl in slices_list:
+
+ logger.debug("SLABSLICE \t verify_slice slicename %s \
+ slices_list %s sl %s \r slice_record %s"
+ % (slicename, slices_list, sl, slice_record))
+ sfa_slice = sl
+ sfa_slice.update(slice_record)
+
+ else:
+ #Search for user in ldap based on email SA 14/11/12
+ ldap_user = self.driver.iotlab_api.ldap.LdapFindUser(\
+ slice_record['user'])
+ logger.debug(" IOTLABSLICES \tverify_slice Oups \
+ slice_record %s sfa_peer %s ldap_user %s"
+ % (slice_record, sfa_peer, ldap_user))
+ #User already registered in ldap, meaning user should be in SFA db
+ #and hrn = sfa_auth+ uid
+ sfa_slice = {'hrn': slicename,
+ 'node_list': [],
+ 'authority': slice_record['authority'],
+ 'gid': slice_record['gid'],
+ 'slice_id': slice_record['record_id'],
+ 'reg-researchers': slice_record['reg-researchers'],
+ 'peer_authority': str(sfa_peer)
+ }
+
+ if ldap_user:
+ hrn = self.driver.iotlab_api.root_auth + '.' + ldap_user['uid']
+ user = self.driver.get_user_record(hrn)
+
+ logger.debug(" IOTLABSLICES \tverify_slice hrn %s USER %s"
+ % (hrn, user))
+
+ # add the external slice to the local SFA iotlab DB
+ if sfa_slice:
+ self.driver.iotlab_api.AddSlice(sfa_slice, user)
+
+ logger.debug("IOTLABSLICES \tverify_slice ADDSLICE OK")
+ return sfa_slice
+
+
+ def verify_persons(self, slice_hrn, slice_record, users, options={}):
+ """Ensures the users in users list exist and are enabled in LDAP. Adds
+ person if needed.
+
+ Checking that a user exist is based on the user's email. If the user is
+ still not found in the LDAP, it means that the user comes from another
+ federated testbed. In this case an account has to be created in LDAP
+ so as to enable the user to use the testbed, since we trust the testbed
+ he comes from. This is done by calling AddPerson.
+
+ :param slice_hrn: slice name
+ :param slice_record: record of the slice_hrn
+ :param users: users is a record list. Records can either be
+ local records or users records from known and trusted federated
+ sites.If the user is from another site that iotlab doesn't trust yet,
+ then Resolve will raise an error before getting to create_sliver.
+
+ :type slice_hrn: string
+ :type slice_record: string
+ :type users: list
+
+ .. seealso:: AddPerson
+ .. note:: Removed unused peer and sfa_peer parameters. SA 18/07/13.
+
+
+ """
+ #TODO SA 21/08/12 verify_persons Needs review
+
+ logger.debug("IOTLABSLICES \tverify_persons \tslice_hrn %s \
+ \t slice_record %s\r\n users %s \t "
+ % (slice_hrn, slice_record, users))
+ users_by_id = {}
+
+ users_by_email = {}
+ #users_dict : dict whose keys can either be the user's hrn or its id.
+ #Values contains only id and hrn
+ users_dict = {}
+
+ #First create dicts by hrn and id for each user in the user record list:
+ for info in users:
+ if 'slice_record' in info:
+ slice_rec = info['slice_record']
+ if 'user' in slice_rec :
+ user = slice_rec['user']
+
+ if 'email' in user:
+ users_by_email[user['email']] = user
+ users_dict[user['email']] = user
+
+ logger.debug("SLABSLICE.PY \t verify_person \
+ users_dict %s \r\n user_by_email %s \r\n \
+ \tusers_by_id %s "
+ % (users_dict, users_by_email, users_by_id))
+
+ existing_user_ids = []
+ existing_user_emails = []
+ existing_users = []
+ # Check if user is in Iotlab LDAP using its hrn.
+ # Assuming Iotlab is centralised : one LDAP for all sites,
+ # user's record_id unknown from LDAP
+ # LDAP does not provide users id, therefore we rely on email to find the
+ # user in LDAP
+
+ if users_by_email:
+ #Construct the list of filters (list of dicts) for GetPersons
+ filter_user = [users_by_email[email] for email in users_by_email]
+ #Check user i in LDAP with GetPersons
+ #Needed because what if the user has been deleted in LDAP but
+ #is still in SFA?
+ existing_users = self.driver.iotlab_api.GetPersons(filter_user)
+ logger.debug(" \r\n SLABSLICE.PY \tverify_person filter_user \
+ %s existing_users %s "
+ % (filter_user, existing_users))
+ #User is in iotlab LDAP
+ if existing_users:
+ for user in existing_users:
+ users_dict[user['email']].update(user)
+ existing_user_emails.append(
+ users_dict[user['email']]['email'])
+
+
+ # User from another known trusted federated site. Check
+ # if a iotlab account matching the email has already been created.
+ else:
+ req = 'mail='
+ if isinstance(users, list):
+ req += users[0]['email']
+ else:
+ req += users['email']
+ ldap_reslt = self.driver.iotlab_api.ldap.LdapSearch(req)
+
+ if ldap_reslt:
+ logger.debug(" SLABSLICE.PY \tverify_person users \
+ USER already in Iotlab \t ldap_reslt %s \
+ " % (ldap_reslt))
+ existing_users.append(ldap_reslt[1])
+
+ else:
+ #User not existing in LDAP
+ logger.debug("SLABSLICE.PY \tverify_person users \
+ not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
+ ldap_reslt %s " % (users, ldap_reslt))
+
+ requested_user_emails = users_by_email.keys()
+ requested_user_hrns = \
+ [users_by_email[user]['hrn'] for user in users_by_email]
+ logger.debug("SLABSLICE.PY \tverify_person \
+ users_by_email %s " % (users_by_email))
+
+ #Check that the user of the slice in the slice record
+ #matches one of the existing users
+ try:
+ if slice_record['PI'][0] in requested_user_hrns:
+ logger.debug(" SLABSLICE \tverify_person ['PI']\
+ slice_record %s" % (slice_record))
+
+ except KeyError:
+ pass
+
+ # users to be added, removed or updated
+ #One user in one iotlab slice : there should be no need
+ #to remove/ add any user from/to a slice.
+ #However a user from SFA which is not registered in Iotlab yet
+ #should be added to the LDAP.
+ added_user_emails = set(requested_user_emails).\
+ difference(set(existing_user_emails))
+
+
+ #self.verify_keys(existing_slice_users, updated_users_list, \
+ #peer, append)
+
+ added_persons = []
+ # add new users
+ #requested_user_email is in existing_user_emails
+ if len(added_user_emails) == 0:
+ slice_record['login'] = users_dict[requested_user_emails[0]]['uid']
+ logger.debug(" SLABSLICE \tverify_person QUICK DIRTY %s"
+ % (slice_record))
+
+ for added_user_email in added_user_emails:
+ added_user = users_dict[added_user_email]
+ logger.debug(" IOTLABSLICES \r\n \r\n \t verify_person \
+ added_user %s" % (added_user))
+ person = {}
+ person['peer_person_id'] = None
+ k_list = ['first_name', 'last_name', 'person_id']
+ for k in k_list:
+ if k in added_user:
+ person[k] = added_user[k]
+
+ person['pkey'] = added_user['keys'][0]
+ person['mail'] = added_user['email']
+ person['email'] = added_user['email']
+ person['key_ids'] = added_user.get('key_ids', [])
+
+ ret = self.driver.iotlab_api.AddPerson(person)
+ if 'uid' in ret:
+ # meaning bool is True and the AddPerson was successful
+ person['uid'] = ret['uid']
+ slice_record['login'] = person['uid']
+ else:
+ # error message in ret
+ logger.debug(" IOTLABSLICES ret message %s" %(ret))
+
+ logger.debug(" SLABSLICE \r\n \r\n \t THE SECOND verify_person\
+ person %s" % (person))
+ #Update slice_Record with the id now known to LDAP
+
+
+ added_persons.append(person)
+ return added_persons
+
+
+ def verify_keys(self, persons, users, peer, options={}):
+ """
+ .. warning:: unused
+ """
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.iotlab_api.GetKeys(key_ids, ['key_id', 'key'])
+
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ users_by_key_string = {}
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ users_by_key_string[key_string] = user
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ #try:
+ ##if peer:
+ #person = persondict[user['email']]
+ #self.driver.iotlab_api.UnBindObjectFromPeer(
+ # 'person',person['person_id'],
+ # peer['shortname'])
+ ret = self.driver.iotlab_api.AddPersonKey(
+ user['email'], key)
+ #if peer:
+ #key_index = user_keys.index(key['key'])
+ #remote_key_id = user['key_ids'][key_index]
+ #self.driver.iotlab_api.BindObjectToPeer('key', \
+ #key['key_id'], peer['shortname'], \
+ #remote_key_id)
+
+ #finally:
+ #if peer:
+ #self.driver.iotlab_api.BindObjectToPeer('person', \
+ #person['person_id'], peer['shortname'], \
+ #user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append is False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for key in removed_keys:
+ #if peer:
+ #self.driver.iotlab_api.UnBindObjectFromPeer('key', \
+ #key, peer['shortname'])
+
+ user = users_by_key_string[key]
+ self.driver.iotlab_api.DeleteKey(user, key)
+
+ return
return version_string
def GetVersion(self, api, options):
- xrn=Xrn(api.hrn, type='authority')
+ xrn=Xrn(api.hrn, type='authority+am')
version = version_core()
cred_types = [{'geni_type': 'geni_sfa', 'geni_version': str(i)} for i in range(4)[-2:]]
geni_api_versions = ApiVersions().get_versions()
ad_rspec_versions.append(rspec_version.to_dict())
if rspec_version.content_type in ['*', 'request']:
request_rspec_versions.append(rspec_version.to_dict())
- xrn=Xrn(api.hrn, 'authority+sa')
+ xrn=Xrn(api.hrn, 'authority+sm')
version_more = {
'interface':'slicemgr',
'sfa': 2,
(slice_urn, rspec_string, expiration, options) = args
slice_hrn, type = urn_to_hrn(slice_urn)
creds = []
- users = options.get('geni_users', [])
+ users = options.get('sfa_users', [])
manifest_string = getattr(self.driver, "create_sliver")(slice_urn, slice_hrn, creds, rspec_string, users, options)
# slivers allocation
class CreateGid(Method):
"""
- Create a signed credential for the s object with the registry. In addition to being stored in the
+ Create a signed credential for the object with the registry. In addition to being stored in the
SFA database, the appropriate records will also be created in the
PLC databases
+ @param cred credential string
@param xrn urn or hrn of certificate owner
@param cert caller's certificate
- @param cred credential string
@return gid string representation
"""
#log the call
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
-
- # log
- origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, xrn, self.name))
return self.api.manager.CreateGid(self.api, xrn, cert)
from sfa.rspecs.elements.channel import Channel
from sfa.rspecs.version_manager import VersionManager
-from sfa.nitos.nitosxrn import NitosXrn, hostname_to_urn, hrn_to_nitos_slicename, slicename_to_hrn
+from sfa.nitos.nitosxrn import NitosXrn, hostname_to_urn, hrn_to_nitos_slicename, slicename_to_hrn, channel_to_urn
from sfa.planetlab.vlink import get_tc_rate
from sfa.planetlab.topology import Topology
rspec_channel['channel_num'] = channel_number
rspec_channel['start_time'] = channel['start_time']
rspec_channel['duration'] = (int(channel['end_time']) - int(channel['start_time'])) / int(grain)
+ rspec_channel['component_id'] = channel_to_urn(self.driver.hrn, self.driver.testbedInfo['name'], channel_number)
# retreive slicename
for slc in slices:
rspec_channel['channel_num'] = channel['channel']
rspec_channel['frequency'] = channel['frequency']
rspec_channel['standard'] = channel['modulation']
+ rspec_channel['component_id'] = channel_to_urn(self.driver.hrn, self.driver.testbedInfo['name'], channel['channel'])
rspec_channels.append(rspec_channel)
return rspec_channels
rspec.version.add_channels(channels)
if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
- leases, channels = self.get_leases_and_channels(slice, slice_xrn)
- rspec.version.add_leases(leases, channels)
+ leases_channels = self.get_leases_and_channels(slice, slice_xrn)
+ rspec.version.add_leases(leases_channels)
return rspec.toxml()
# add/remove leases (nodes and channels)
# a lease in Nitos RSpec case is a reservation of nodes and channels grouped by (slice,timeslot)
- rspec_requested_nodes, rspec_requested_channels = rspec.version.get_leases()
-
+ rspec_requested_leases = rspec.version.get_leases()
+ rspec_requested_nodes = []
+ rspec_requested_channels = []
+ for lease in rspec_requested_leases:
+ if lease['type'] == 'node':
+ lease.pop('type', None)
+ rspec_requested_nodes.append(lease)
+ else:
+ lease.pop('type', None)
+ rspec_requested_channels.append(lease)
+
nodes = slices.verify_slice_leases_nodes(slice, rspec_requested_nodes)
channels = slices.verify_slice_leases_channels(slice, rspec_requested_channels)
from sfa.rspecs.rspec import RSpec
-from sfa.nitos.nitosxrn import NitosXrn, hrn_to_nitos_slicename, xrn_to_hostname
+from sfa.nitos.nitosxrn import NitosXrn, hrn_to_nitos_slicename, xrn_to_hostname, xrn_to_channel
MAXINT = 2L**31-1
slice_name = hrn_to_nitos_slicename(channel['slice_id'])
if slice_name != slice['slice_name']:
continue
- channel_num = channel['channel_num']
+ channel_num = xrn_to_channel(channel['component_id'])
nitos_channel = self.driver.filter_nitos_results(nitos_channels, {'channel': channel_num})[0]
# fill the requested channel with nitos ids
requested_channel['slice_id'] = slice['slice_id']
return NitosXrn(xrn=hrn,type='any').nitos_authname()
def xrn_to_hostname(hrn):
return Xrn.unescape(NitosXrn(xrn=hrn, type='node').get_leaf())
+def channel_to_hrn (auth, login_base, channel):
+ return NitosXrn(auth=auth+'.'+login_base, channel=channel).get_hrn()
+def channel_to_urn (auth, login_base, channel):
+ return NitosXrn(auth=auth+'.'+login_base, channel=channel).get_urn()
+def xrn_to_channel(hrn):
+ return Xrn.unescape(NitosXrn(xrn=hrn, type='channel').get_leaf())
class NitosXrn (Xrn):
def site_hrn (auth, login_base):
return '.'.join([auth,login_base])
- def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
+ def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, channel=None, **kwargs):
#def hostname_to_hrn(auth_hrn, login_base, hostname):
if hostname is not None:
self.type='node'
self.type = 'interface'
self.hrn = auth + '.' + interface
self.hrn_to_urn()
+ elif channel is not None:
+ self.type='channel'
+ self.hrn=".".join([auth] + [channel])
+ self.hrn_to_urn()
else:
Xrn.__init__ (self,**kwargs)
#!/usr/bin/python
from collections import defaultdict
-from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
+from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn, get_authority, get_leaf
from sfa.util.sfatime import utcparse, datetime_to_string
from sfa.util.sfalogging import logger
from sfa.util.faults import SliverDoesNotExist
from sfa.rspecs.elements.granularity import Granularity
from sfa.rspecs.version_manager import VersionManager
-from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, slicename_to_hrn
+from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, slicename_to_hrn, xrn_to_ext_slicename, top_auth
from sfa.planetlab.vlink import get_tc_rate
from sfa.planetlab.topology import Topology
from sfa.storage.alchemy import dbsession
except ValueError:
pass
else:
- names.add(xrn.pl_slicename())
+ slice_hrn = xrn.get_hrn()
+ top_auth_hrn = top_auth(slice_hrn)
+ if top_auth_hrn == self.driver.hrn:
+ slice_name = hrn_to_pl_slicename(slice_hrn)
+ else:
+ slice_name = xrn_to_ext_slicename(slice_hrn)
+ names.add(slice_name)
filter = {}
if names:
if not slices:
return []
slice = slices[0]
- slice['hrn'] = PlXrn(auth=self.driver.hrn, slicename=slice['name']).hrn
+ slice['hrn'] = slice_hrn
# get sliver users
persons = []
# construct user key info
users = []
for person in persons:
- name = person['email'][0:person['email'].index('@')]
+ person_urn = hrn_to_urn(self.driver.shell.GetPersonHrn(int(person['person_id'])), 'user')
user = {
'login': slice['name'],
- 'user_urn': Xrn('%s.%s' % (self.driver.hrn, name), type='user').urn,
+ 'user_urn': person_urn,
'keys': [keys[k_id]['key'] for k_id in person['key_ids'] if k_id in keys]
}
users.append(user)
site_id=lease['site_id']
site=sites_dict[site_id]
- rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], lease['hostname'])
- slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ rspec_lease['component_id'] = hrn_to_urn(self.driver.shell.GetNodeHrn(lease['hostname']), 'node')
+ slice_hrn = self.driver.shell.GetSliceHrn(lease['slice_id'])
slice_urn = hrn_to_urn(slice_hrn, 'slice')
rspec_lease['slice_id'] = slice_urn
rspec_lease['start_time'] = lease['t_from']
import sfa.planetlab.peers as peers
from sfa.planetlab.plaggregate import PlAggregate
from sfa.planetlab.plslices import PlSlices
-from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname
+from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname, xrn_to_ext_slicename, top_auth
def list_to_dict(recs, key):
# ensure slice record exists
slice = slices.verify_slice(xrn.hrn, slice_record, peer, sfa_peer, expiration=expiration, options=options)
# ensure person records exists
- #persons = slices.verify_persons(xrn.hrn, slice, users, peer, sfa_peer, options=options)
+ persons = slices.verify_persons(xrn.hrn, slice, users, peer, sfa_peer, options=options)
# ensure slice attributes exists
slices.verify_slice_attributes(slice, requested_attributes, options=options)
peer = slices.get_peer(slice['hrn'])
sfa_peer = slices.get_sfa_peer(slice['hrn'])
users = options.get('geni_users', [])
- persons = slices.verify_persons(None, slice, users, peer, sfa_peer, options=options)
+ persons = slices.verify_persons(slice['hrn'], slice, users, peer, sfa_peer, options=options)
slices.handle_peer(None, None, persons, peer)
# update sliver allocation states and set them to geni_provisioned
sliver_ids = [sliver['sliver_id'] for sliver in slivers]
slivers = aggregate.get_slivers(urns)
if slivers:
slice_id = slivers[0]['slice_id']
+ slice_name = slivers[0]['name']
node_ids = []
sliver_ids = []
for sliver in slivers:
node_ids.append(sliver['node_id'])
sliver_ids.append(sliver['sliver_id'])
+ # leases
+ leases = self.shell.GetLeases({'name': slice_name})
+ leases_ids = [lease['lease_id'] for lease in leases ]
+
# determine if this is a peer slice
# xxx I wonder if this would not need to use PlSlices.get_peer instead
# in which case plc.peers could be deprecated as this here
# is the only/last call to this last method in plc.peers
- slice_hrn = PlXrn(auth=self.hrn, slicename=slivers[0]['name']).get_hrn()
+ #slice_hrn = PlXrn(auth=self.hrn, slice_name).get_hrn()
+ slice_hrn = self.shell.GetSliceHrn(int(slice_id))
peer = peers.get_peer(self, slice_hrn)
try:
if peer:
self.shell.UnBindObjectFromPeer('slice', slice_id, peer)
self.shell.DeleteSliceFromNodes(slice_id, node_ids)
+ if len(leases_ids) > 0:
+ self.shell.DeleteLeases(leases_ids)
# delete sliver allocation states
SliverAllocation.delete_allocations(sliver_ids)
# set the 'enabled' tag to 0
def shutdown (self, xrn, options={}):
- xrn = PlXrn(xrn=xrn, type='slice')
- slicename = xrn.pl_slicename()
+ hrn = urn_to_hrn(xrn)
+ top_auth_hrn = top_auth(hrn)
+ if top_auth_hrn == self.hrn:
+ slicename = hrn_to_pl_slicename(hrn)
+ else:
+ slicename = xrn_to_ext_slicename(hrn)
slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
if not slices:
raise RecordNotFound(slice_hrn)
'GetSites','GetNodes',
# Lease management methods
'GetLeases', 'GetLeaseGranularity', 'DeleteLeases','UpdateLeases',
- 'AddLeases'
+ 'AddLeases',
+ # HRN management methods
+ 'SetPersonHrn', 'GetPersonHrn', 'SetSliceHrn', 'GetSliceHrn',
+ 'SetNodeHrn', 'GetNodeHrn'
]
# support for other names - this is experimental
alias_calls = { 'get_authorities':'GetSites',
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
from sfa.planetlab.topology import Topology
-from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname
+from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname, xrn_to_ext_slicename, hrn_to_ext_loginbase, top_auth
from sfa.storage.model import SliverAllocation
from sfa.storage.alchemy import dbsession
requested_leases = []
for lease in rspec_requested_leases:
requested_lease = {}
- slice_name = hrn_to_pl_slicename(lease['slice_id'])
+ slice_hrn, _ = urn_to_hrn(lease['slice_id'])
+ top_auth_hrn = top_auth(slice_hrn)
+ if top_auth_hrn == self.driver.hrn:
+ slice_name = hrn_to_pl_slicename(lease['slice_id'])
+ else:
+ slice_name = xrn_to_ext_slicename(lease['slice_id'])
if slice_name != slice['name']:
continue
elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
(slice_hrn, type) = urn_to_hrn(slice_xrn)
- site_hrn = get_authority(slice_hrn)
- # login base can't be longer than 20 characters
- slicename = hrn_to_pl_slicename(slice_hrn)
- authority_name = slicename.split('_')[0]
- login_base = authority_name[:20]
+ top_auth_hrn = top_auth(slice_hrn)
+ if top_auth_hrn == self.driver.hrn:
+ # login base can't be longer than 20 characters
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ authority_name = slicename.split('_')[0]
+ login_base = authority_name[:20]
+ else:
+ login_base = hrn_to_ext_loginbase(slice_hrn)
+ authority_name = login_base
+
sites = self.driver.shell.GetSites(login_base)
if not sites:
# create new site record
- site = {'name': 'geni.%s' % authority_name,
+ site = {'name': 'sfa.%s' % authority_name,
'abbreviated_name': authority_name,
'login_base': login_base,
'max_slices': 100,
site['site_id'] = self.driver.shell.AddSite(site)
# exempt federated sites from monitor policies
self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
-
+
# # is this still necessary?
# # add record to the local registry
# if sfa_peer and slice_record:
site = sites[0]
if peer:
# unbind from peer so we can modify if necessary. Will bind back later
- self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
-
- return site
+ self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
+
+ return site
+
def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, expiration, options={}):
- slicename = hrn_to_pl_slicename(slice_hrn)
- parts = slicename.split("_")
- login_base = parts[0]
+ top_auth_hrn = top_auth(slice_hrn)
+ if top_auth_hrn == self.driver.hrn:
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ parts = slicename.split("_")
+ login_base = parts[0]
+ else:
+ login_base = hrn_to_ext_loginbase(slice_hrn)
+ slicename = xrn_to_ext_slicename(slice_hrn)
+
slices = self.driver.shell.GetSlices([slicename])
expires = int(datetime_to_epoch(utcparse(expiration)))
if not slices:
slice = {'name': slicename,
- 'url': 'No Url',
- 'description': 'No Description'}
+ 'url': slice_record.get('url', slice_hrn),
+ 'description': slice_record.get('description', slice_hrn)}
# add the slice
slice['slice_id'] = self.driver.shell.AddSlice(slice)
slice['node_ids'] = []
slice['person_ids'] = []
+ # set the slice HRN
+ self.driver.shell.SetSliceHrn(int(slice['slice_id']), slice_hrn)
+
if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# set the expiration
self.driver.shell.UpdateSlice(slice['slice_id'], {'expires': expires})
else:
slice = slices[0]
+ # Check slice HRN
+ if self.driver.shell.GetSliceHrn(slice['slice_id']) != slice_hrn:
+ self.driver.shell.SetSliceHrn(slice['slice_id'], slice_hrn)
+
if peer and slice_record:
slice['peer_slice_id'] = slice_record.get('slice_id', None)
# unbind from peer so we can modify if necessary. Will bind back later
def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
users_by_email = {}
users_by_site = defaultdict(list)
- users_dict = {}
+ users_dict = {}
for user in users:
user['urn'] = user['urn'].lower()
hrn, type = urn_to_hrn(user['urn'])
username = get_leaf(hrn)
- login_base = PlXrn(xrn=user['urn']).pl_login_base()
user['username'] = username
- user['site'] = login_base
+ top_auth_hrn = top_auth(hrn)
+
+ if top_auth_hrn == self.driver.hrn:
+ login_base = PlXrn(xrn=user['urn']).pl_login_base()
+ else:
+ login_base = hrn_to_ext_loginbase(hrn)
+
+ user['site'] = login_base
if 'email' in user:
- user['email'] = user['email'].lower()
+ user['email'] = user['email'].lower()
users_by_email[user['email']] = user
users_dict[user['email']] = user
else:
if users_by_site:
for login_base in users_by_site:
users = users_by_site[login_base]
- for user in users:
- existing_user_ids_filter.append(user['username']+'@geni.net')
- if existing_user_ids_filter:
+ for user in users:
+ existing_user_ids_filter.append(user['username']+'@geni.net')
+
+ if existing_user_ids_filter:
# get existing users by email
- existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
+ existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
['person_id', 'key_ids', 'email'])
existing_user_ids.extend([user['email'] for user in existing_users])
-
+
if users_by_site:
# get a list of user sites (based on requeste user urns
site_list = self.driver.shell.GetSites(users_by_site.keys(), \
sites[site['site_id']] = site
site_user_ids.extend(site['person_ids'])
- existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
+ existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
['person_id', 'key_ids', 'email', 'site_ids'])
# all requested users are either existing users or new (added) users
break
if user_found:
break
-
+
if user_found == False:
fake_email = requested_user['username'] + '@geni.net'
requested_user['email'] = fake_email
users_dict[fake_email] = requested_user
-
+
# requested slice users
requested_user_ids = users_dict.keys()
# existing slice users
existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
['person_id', 'key_ids', 'email'])
existing_slice_user_ids = [user['email'] for user in existing_slice_users]
-
+
# users to be added, removed or updated
added_user_ids = set(requested_user_ids).difference(existing_user_ids)
added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
# add new users
for added_user_id in added_user_ids:
added_user = users_dict[added_user_id]
- hrn, type = urn_to_hrn(added_user['urn'])
+ hrn, type = urn_to_hrn(added_user['urn'])
person = {
'first_name': added_user.get('first_name', hrn),
'last_name': added_user.get('last_name', hrn),
'email': added_user_id,
- 'peer_person_id': None,
- 'keys': [],
+ #'peer_person_id': None,
+ #'keys': [],
#'key_ids': added_user.get('key_ids', []),
}
person['person_id'] = self.driver.shell.AddPerson(person)
+ self.driver.shell.AddRoleToPerson('user', int(person['person_id']))
+ # check user HRN
+ if self.driver.shell.GetPersonHrn(int(person['person_id'])) != hrn:
+ self.driver.shell.SetPersonHrn(int(person['person_id']), hrn)
+
if peer:
person['peer_person_id'] = added_user['person_id']
added_persons.append(person)
-
+
# enable the account
self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
-
+
# add person to site
self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
for key_string in added_user.get('keys', []):
key = {'key':key_string, 'key_type':'ssh'}
key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
+ if 'keys' not in person:
+ person['keys'] = []
person['keys'].append(key)
# add the registry record
# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
# 'pointer': person['person_id']}
# self.registry.register_peer_object(self.credential, peer_dict)
-
+
for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
# add person to the slice
self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
# no need to return worry about it getting bound later
return added_persons
-
+
def verify_keys(self, persons, users, peer, options={}):
# existing keys
keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
keydict = {}
for key in keylist:
- keydict[key['key']] = key['key_id']
+ keydict[key['key']] = key['key_id']
existing_keys = keydict.keys()
persondict = {}
for person in persons:
- persondict[person['email']] = person
-
+ persondict[person['email']] = person
+
# add new keys
requested_keys = []
updated_persons = []
key_index = user_keys.index(key['key'])
remote_key_id = user['key_ids'][key_index]
self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
-
+
finally:
if peer:
self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
-
+
# remove old keys (only if we are not appending)
append = options.get('append', True)
- if append == False:
+ if append == False:
removed_keys = set(existing_keys).difference(requested_keys)
for existing_key_id in keydict:
if keydict[existing_key_id] in removed_keys:
self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
self.driver.shell.DeleteKey(existing_key_id)
except:
- pass
+ pass
def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False):
append = options.get('append', True)
removed_slice_attributes = []
ignored_slice_attribute_names = []
existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
-
+
# get attributes that should be removed
for slice_tag in existing_slice_attributes:
if slice_tag['tagname'] in ignored_slice_attribute_names:
if not attribute_found and not append:
removed_slice_attributes.append(slice_tag)
-
+
# get attributes that should be added:
for requested_attribute in requested_slice_attributes:
# if the requested attribute wasn't found we should add it
# specialized Xrn class for PlanetLab
import re
-from sfa.util.xrn import Xrn
+from sfa.util.xrn import Xrn, get_authority
# temporary helper functions to use this module instead of namespace
def hostname_to_hrn (auth, login_base, hostname):
def xrn_to_hostname(hrn):
return Xrn.unescape(PlXrn(xrn=hrn, type='node').get_leaf())
+# helpers to handle external objects created via fedaration
+def xrn_to_ext_slicename (xrn):
+ slice_hrn=PlXrn(xrn=xrn,type='slice').get_hrn()
+ site_hrn = get_authority(slice_hrn)
+ login_base = '8'.join(site_hrn.split('.'))
+ slice_name = '_'.join([login_base, slice_hrn.split('.')[-1]])
+ return slice_name
+
+def hrn_to_ext_loginbase (hrn):
+ site_hrn = get_authority(hrn)
+ login_base = '8'.join(site_hrn.split('.'))[:20]
+ return login_base
+
+def top_auth (hrn):
+ return hrn.split('.')[0]
+
class PlXrn (Xrn):
@staticmethod
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Attribute(Element):
+
+ fields = [
+ 'name',
+ 'value',
+ ]
+
'slice_id',
'start_time',
'duration',
+ 'component_id',
]
-from sfa.util.xrn import Xrn
+from sfa.util.xrn import Xrn, get_leaf
from sfa.util.xml import XpathFilter
from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
-from sfa.planetlab.plxrn import xrn_to_hostname
class Node:
@staticmethod
node_elems.append(node_elem)
# set component name
if node.get('component_id'):
- component_name = xrn_to_hostname(node['component_id'])
+ component_name = Xrn.unescape(get_leaf(Xrn(node['component_id']).get_hrn()))
node_elem.set('component_name', component_name)
# set hardware types
if node.get('hardware_types'):
from sfa.util.sfalogging import logger
from sfa.util.xml import XpathFilter
from sfa.util.xrn import Xrn
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-
-#from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
-#from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from sfa.rspecs.elements.lease import Lease
-class Slabv1Lease:
+class Iotlabv1Lease:
@staticmethod
def add_leases(xml, leases):
-
+
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
network_elem = xml.add_element('network', name = network_urn)
else:
network_elem = xml
-
- lease_elems = []
+
+ lease_elems = []
for lease in leases:
- lease['start_time'] = datetime_to_string(utcparse(lease['start_time']))
lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease, lease_fields)
def get_leases(xml, filter={}):
xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
lease_elems = xml.xpath(xpath)
- return Slabv1Lease.get_lease_objs(lease_elems)
+ return Iotlabv1Lease.get_lease_objs(lease_elems)
@staticmethod
def get_lease_objs(lease_elems):
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
leases.append(lease)
from sfa.rspecs.elements.hardware_type import HardwareType
from sfa.rspecs.elements.element import Element
from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+from sfa.rspecs.elements.versions.iotlabv1Sliver import Iotlabv1Sliver
from sfa.util.sfalogging import logger
-class SlabNode(NodeElement):
+class IotlabNode(NodeElement):
#First get the fields already defined in the class Node
fields = list(NodeElement.fields)
- #Extend it with senslab's specific fields
+ #Extend it with iotlab's specific fields
fields.extend (['archi', 'radio', 'mobile','position'])
-
-class SlabPosition(Element):
+
+class IotlabPosition(Element):
fields = ['posx', 'posy','posz']
-
-class SlabLocation(Location):
+
+class IotlabLocation(Location):
fields = list(Location.fields)
fields.extend (['site'])
-
+
+
+class IotlabMobility(Element):
+ """ Class to give information of a node's mobility, and what kind of
+ mobility it is (train, roomba robot ...) """
+ fields = ['mobile', 'mobility_type']
-class Slabv1Node:
-
+class Iotlabv1Node:
+
@staticmethod
def add_connection_information(xml, ldap_username, sites_set):
- """ Adds login and ssh connection info in the network item in
- the xml. Does not create the network element, therefore
+ """ Adds login and ssh connection info in the network item in
+ the xml. Does not create the network element, therefore
should be used after add_nodes, which creates the network item.
-
+
"""
- logger.debug(" add_connection_information " )
+ logger.debug(" add_connection_information ")
#Get network item in the xml
- network_elems = xml.xpath('//network')
+ network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
- slab_network_dict = {}
- slab_network_dict['login'] = ldap_username
-
- slab_network_dict['ssh'] = \
- ['ssh ' + ldap_username + '@'+site+'.senslab.info' \
- for site in sites_set]
- network_elem.set('ssh', \
- unicode(slab_network_dict['ssh']))
- network_elem.set('login', unicode( slab_network_dict['login']))
-
-
+ iotlab_network_dict = {}
+ iotlab_network_dict['login'] = ldap_username
+
+ iotlab_network_dict['ssh'] = \
+ ['ssh ' + ldap_username + '@'+site+'.iotlab.info'
+ for site in sites_set]
+ network_elem.set('ssh',
+ unicode(iotlab_network_dict['ssh']))
+ network_elem.set('login', unicode(iotlab_network_dict['login']))
+
@staticmethod
def add_nodes(xml, nodes):
+ """Adds the nodes to the xml.
+
+ Adds the nodes as well as dedicated iotlab fields to the node xml
+ element.
+
+ :param xml: the xml being constructed.
+ :type xml: xml
+ :param nodes: list of node dict
+ :type nodes: list
+ :returns: a list of node elements.
+ :rtype: list
+
+ """
#Add network item in the xml
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
network_urn = nodes[0]['component_manager_id']
- network_elem = xml.add_element('network', \
- name = Xrn(network_urn).get_hrn())
+ network_elem = xml.add_element('network',
+ name=Xrn(network_urn).get_hrn())
else:
network_elem = xml
-
- logger.debug("slabv1Node \t add_nodes nodes %s \r\n "%(nodes[0]))
+
node_elems = []
#Then add nodes items to the network item in the xml
for node in nodes:
#Attach this node to the network element
- node_fields = ['component_manager_id', 'component_id', 'exclusive',\
- 'boot_state', 'mobile']
+ node_fields = ['component_manager_id', 'component_id', 'exclusive',
+ 'boot_state', 'mobile']
node_elem = network_elem.add_instance('node', node, node_fields)
node_elems.append(node_elem)
-
+
#Set the attibutes of this node element
- for attribute in node:
+ for attribute in node:
# set component name
- if attribute is 'component_id':
+ if attribute is 'component_name':
component_name = node['component_name']
node_elem.set('component_name', component_name)
-
- # set hardware types, extend fields to add Senslab's architecture
+
+ # set hardware types, extend fields to add Iotlab's architecture
#and radio type
-
+
if attribute is 'hardware_types':
- for hardware_type in node.get('hardware_types', []):
+ for hardware_type in node.get('hardware_types', []):
fields = HardwareType.fields
- fields.extend(['archi','radio'])
+ fields.extend(['archi', 'radio'])
node_elem.add_instance('hardware_types', node, fields)
+ # set mobility
+ if attribute is 'mobility':
+ node_elem.add_instance('mobility', node['mobility'],
+ IotlabMobility.fields)
# set location
if attribute is 'location':
- node_elem.add_instance('location', node['location'], \
- SlabLocation.fields)
+ node_elem.add_instance('location', node['location'],
+ IotlabLocation.fields)
+
# add granularity of the reservation system
#TODO put the granularity in network instead SA 18/07/12
- if attribute is 'granularity' :
+ if attribute is 'granularity':
granularity = node['granularity']
if granularity:
- node_elem.add_instance('granularity', \
- granularity, granularity.fields)
-
-
+ node_elem.add_instance('granularity',
+ granularity, granularity.fields)
+
# set available element
if attribute is 'boot_state':
if node.get('boot_state').lower() == 'alive':
- available_elem = node_elem.add_element('available', \
- now='true')
+ available_elem = node_elem.add_element('available',
+ now='true')
else:
- available_elem = node_elem.add_element('available', \
- now='false')
+ available_elem = node_elem.add_element('available',
+ now='false')
- #set position
+ #set position
if attribute is 'position':
- node_elem.add_instance('position', node['position'], \
- SlabPosition.fields)
+ node_elem.add_instance('position', node['position'],
+ IotlabPosition.fields)
## add services
- #PGv2Services.add_services(node_elem, node.get('services', []))
+ #PGv2Services.add_services(node_elem, node.get('services', []))
# add slivers
if attribute is 'slivers':
slivers = node.get('slivers', [])
if not slivers:
# we must still advertise the available sliver types
- slivers = Sliver({'type': 'slab-node'})
+ slivers = Sliver({'type': 'iotlab-node'})
# we must also advertise the available initscripts
#slivers['tags'] = []
- #if node.get('pl_initscripts'):
+ #if node.get('pl_initscripts'):
#for initscript in node.get('pl_initscripts', []):
#slivers['tags'].append({'name': 'initscript', \
#'value': initscript['name']})
-
- Slabv1Sliver.add_slivers(node_elem, slivers)
+
+ Iotlabv1Sliver.add_slivers(node_elem, slivers)
return node_elems
-
-
@staticmethod
def get_nodes(xml, filter={}):
xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), \
XpathFilter.xpath(filter))
- node_elems = xml.xpath(xpath)
- return Slabv1Node.get_node_objs(node_elems)
+ node_elems = xml.xpath(xpath)
+ return Iotlabv1Node.get_node_objs(node_elems)
- @staticmethod
+ @staticmethod
def get_nodes_with_slivers(xml, sliver_filter={}):
xpath = '//node[count(sliver)>0] | \
- //default:node[count(default:sliver) > 0]'
- node_elems = xml.xpath(xpath)
+ //default:node[count(default:sliver) > 0]'
+ node_elems = xml.xpath(xpath)
logger.debug("SLABV1NODE \tget_nodes_with_slivers \
node_elems %s"%(node_elems))
- return Slabv1Node.get_node_objs(node_elems)
+ return Iotlabv1Node.get_node_objs(node_elems)
@staticmethod
def get_node_objs(node_elems):
nodes = []
for node_elem in node_elems:
node = NodeElement(node_elem.attrib, node_elem)
- nodes.append(node)
+ nodes.append(node)
if 'component_id' in node_elem.attrib:
node['authority_id'] = \
Xrn(node_elem.attrib['component_id']).get_authority_urn()
-
+
# get hardware types
hardware_type_elems = node_elem.xpath('./default:hardware_type | \
./hardware_type')
node['hardware_types'] = [hw_type.get_instance(HardwareType) \
for hw_type in hardware_type_elems]
-
+
# get location
location_elems = node_elem.xpath('./default:location | ./location')
locations = [location_elem.get_instance(Location) \
for location_elem in location_elems]
if len(locations) > 0:
node['location'] = locations[0]
-
-
+
+
# get interfaces
iface_elems = node_elem.xpath('./default:interface | ./interface')
node['interfaces'] = [iface_elem.get_instance(Interface) \
for iface_elem in iface_elems]
+ # get position
+ position_elems = node_elem.xpath('./default:position | ./position')
+ if position_elems:
+ position_elem = position_elems[0]
+ node['position'] = position_elem.get_instance(IotlabPosition)
+
# get services
#node['services'] = PGv2Services.get_services(node_elem)
# get slivers
- node['slivers'] = Slabv1Sliver.get_slivers(node_elem)
+ node['slivers'] = Iotlabv1Sliver.get_slivers(node_elem)
available_elems = node_elem.xpath('./default:available | \
./available')
if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
- if available_elems[0].attrib.get('now', '').lower() == 'true':
+ if available_elems[0].attrib.get('now', '').lower() == 'true':
node['boot_state'] = 'boot'
- else:
- node['boot_state'] = 'disabled'
-
+ else:
+ node['boot_state'] = 'disabled'
+
logger.debug("SLABV1NODE \tget_nodes_objs \
#nodes %s"%(nodes))
return nodes
@staticmethod
def add_slivers(xml, slivers):
- logger.debug("SLABv1NODE \tadd_slivers ")
+ logger.debug("Iotlabv1NODE \tadd_slivers ")
component_ids = []
for sliver in slivers:
filter_sliver = {}
sliver = {}
elif 'component_id' in sliver and sliver['component_id']:
filter_sliver['component_id'] = '*%s*' % sliver['component_id']
- if not filter_sliver:
+ if not filter_sliver:
continue
- nodes = Slabv1Node.get_nodes(xml, filter_sliver)
+ nodes = Iotlabv1Node.get_nodes(xml, filter_sliver)
if not nodes:
continue
node = nodes[0]
- Slabv1Sliver.add_slivers(node, sliver)
+ Iotlabv1Sliver.add_slivers(node, sliver)
@staticmethod
def remove_slivers(xml, hostnames):
for hostname in hostnames:
- nodes = Slabv1Node.get_nodes(xml, \
+ nodes = Iotlabv1Node.get_nodes(xml, \
{'component_id': '*%s*' % hostname})
for node in nodes:
- slivers = Slabv1Sliver.get_slivers(node.element)
+ slivers = Iotlabv1Sliver.get_slivers(node.element)
for sliver in slivers:
- node.element.remove(sliver.element)
+ node.element.remove(sliver.element)
+
+
-
-
#from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
import sys
-class Slabv1Sliver:
+class Iotlabv1Sliver:
@staticmethod
def add_slivers(xml, slivers):
if not slivers:
- return
+ return
if not isinstance(slivers, list):
slivers = [slivers]
- for sliver in slivers:
+ for sliver in slivers:
#sliver_elem = xml.add_element('sliver_type')
sliver_elem = xml.add_element('sliver')
if sliver.get('type'):
sliver_elem.set('client_id', sliver['client_id'])
#images = sliver.get('disk_images')
#if images and isinstance(images, list):
- #Slabv1DiskImage.add_images(sliver_elem, images)
- Slabv1Sliver.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
-
+ #Iotlabv1DiskImage.add_images(sliver_elem, images)
+ Iotlabv1Sliver.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
+
@staticmethod
def add_sliver_attributes(xml, attributes):
- if attributes:
+ if attributes:
for attribute in attributes:
if attribute['name'] == 'initscript':
xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
attrib_dict = eval(tag['value'])
for (key, value) in attrib_dict.items():
- attrib_elem.set(key, value)
+ attrib_elem.set(key, value)
@staticmethod
def get_slivers(xml, filter={}):
xpath = './default:sliver | ./sliver'
-
+
sliver_elems = xml.xpath(xpath)
slivers = []
- for sliver_elem in sliver_elems:
+ for sliver_elem in sliver_elems:
sliver = Sliver(sliver_elem.attrib,sliver_elem)
- if 'component_id' in xml.attrib:
+ if 'component_id' in xml.attrib:
sliver['component_id'] = xml.attrib['component_id']
if 'name' in sliver_elem.attrib:
sliver['type'] = sliver_elem.attrib['name']
- #sliver['images'] = Slabv1DiskImage.get_images(sliver_elem)
-
+ #sliver['images'] = Iotlabv1DiskImage.get_images(sliver_elem)
+
print>>sys.stderr, "\r\n \r\n SLABV1SLIVER.PY \t\t\t get_slivers sliver %s " %( sliver)
slivers.append(sliver)
return slivers
@staticmethod
def get_sliver_attributes(xml, filter={}):
- return []
\ No newline at end of file
+ return []
\ No newline at end of file
channel_elems = []
for channel in channels:
- channel_fields = ['channel_num', 'frequency', 'standard']
+ channel_fields = ['channel_num', 'frequency', 'standard', 'component_id']
channel_elem = spectrum_elem.add_instance('channel', channel, channel_fields)
channel_elems.append(channel_elem)
channel['channel_num'] = channel_elem.attrib['channel_num']
channel['frequency'] = channel_elem.attrib['frequency']
channel['standard'] = channel_elem.attrib['standard']
+ channel['component_id'] = channel_elem.attrib['component_id']
channels.append(channel)
return channels
lease_elems = []
for lease in grouped_leases:
- lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
+ #lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
lease_fields = ['slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
# add reserved channels of this lease
#channels = [{'channel_id': 1}, {'channel_id': 2}]
for channel in channels:
- channel['start_time'] = datetime_to_string(utcparse(channel['start_time']))
+ #channel['start_time'] = datetime_to_string(utcparse(channel['start_time']))
if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
- lease_elem.add_instance('channel', channel, ['channel_num'])
+ lease_elem.add_instance('channel', channel, ['component_id'])
@staticmethod
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ #lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
+ lease['type'] = 'node'
leases.append(lease)
#get channels
channel_elems = lease_elem.xpath('./default:channel | ./channel')
for channel_elem in channel_elems:
channel = Channel(channel_elem.attrib, channel_elem)
channel['slice_id'] = lease_elem.attrib['slice_id']
- channel['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ #channel['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ channel['start_time'] = lease_elem.attrib['start_time']
channel['duration'] = lease_elem.attrib['duration']
- channel['channel_num'] = channel_elem.attrib['channel_num']
+ channel['component_id'] = channel_elem.attrib['component_id']
+ channel['type'] = 'channel'
channels.append(channel)
- return (leases, channels)
+ return leases + channels
class NITOSv1Node:
@staticmethod
- def add_nodes(xml, nodes):
+ def add_nodes(xml, nodes, rspec_content_type=None):
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
tag_elem = node_elem.add_element(tag['tagname'])
tag_elem.set_text(tag['value'])
NITOSv1Sliver.add_slivers(node_elem, node.get('slivers', []))
+
+ # add sliver tag in Request Rspec
+ if rspec_content_type == "request":
+ node_elem.add_instance('sliver', '', [])
@staticmethod
def add_slivers(xml, slivers):
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
-from sfa.planetlab.plxrn import PlXrn
+#from sfa.planetlab.plxrn import PlXrn
class NITOSv1Sliver:
for tag in tags:
NITOSv1Sliver.add_sliver_attribute(sliver_elem, tag['tagname'], tag['value'])
if sliver.get('sliver_id'):
- name = PlXrn(xrn=sliver.get('sliver_id')).pl_slicename()
+ name = Xrn(xrn=sliver.get('sliver_id')).get_hrn().split('.')[-1]
sliver_elem.set('name', name)
@staticmethod
from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
from sfa.rspecs.elements.lease import Lease
-from sfa.planetlab.plxrn import xrn_to_hostname
class PGv2Lease:
@staticmethod
lease_elems = []
for lease in grouped_leases:
- lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
+ #lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
lease_fields = ['slice_id', 'start_time', 'duration']
lease_elem = xml.add_instance('lease', lease[0], lease_fields)
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ #lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
leases.append(lease)
-from sfa.util.xrn import Xrn
+from sfa.util.xrn import Xrn, get_leaf
from sfa.util.xml import XpathFilter
from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
+from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.elements.attribute import Attribute
-from sfa.planetlab.plxrn import xrn_to_hostname
class PGv2Node:
@staticmethod
- def add_nodes(xml, nodes):
+ def add_nodes(xml, nodes, rspec_content_type=None):
node_elems = []
for node in nodes:
node_fields = ['component_manager_id', 'component_id', 'client_id', 'sliver_id', 'exclusive']
node_elems.append(node_elem)
# set component name
if node.get('component_id'):
- component_name = xrn_to_hostname(node['component_id'])
+ component_name = Xrn.unescape(get_leaf(Xrn(node['component_id']).get_hrn()))
node_elem.set('component_name', component_name)
# set hardware types
if node.get('hardware_types'):
node_elem.add_instance('location', node['location'], Location.fields)
# set granularity
- if node['exclusive'] == "true":
+ if node.get('exclusive') == "true":
granularity = node.get('granularity')
node_elem.add_instance('granularity', granularity, granularity.fields)
# set interfaces
slivers = node.get('slivers', [])
if not slivers:
# we must still advertise the available sliver types
- slivers = Sliver({'type': 'plab-vserver'})
+ if node.get('sliver_type'):
+ slivers = Sliver({'type': node['sliver_type']})
+ else:
+ # Planet lab
+ slivers = Sliver({'type': 'plab-vserver'})
# we must also advertise the available initscripts
slivers['tags'] = []
if node.get('pl_initscripts'):
for initscript in node.get('pl_initscripts', []):
slivers['tags'].append({'name': 'initscript', 'value': initscript['name']})
PGv2SliverType.add_slivers(node_elem, slivers)
+
+ # advertise the node tags
+ tags = node.get('tags', [])
+ if tags:
+ for tag in tags:
+ tag['name'] = tag.pop('tagname')
+ node_elem.add_instance('{%s}attribute' % xml.namespaces['planetlab'], tag, ['name', 'value'])
+
+ # add sliver tag in Request Rspec
+ #if rspec_content_type == "request":
+ # node_elem.add_instance('sliver', '', [])
+
return node_elems
+
@staticmethod
def get_nodes(xml, filter={}):
xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
# get hardware types
hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
- node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
+ node['hardware_types'] = [dict(hw_type.get_instance(HardwareType)) for hw_type in hardware_type_elems]
# get location
location_elems = node_elem.xpath('./default:location | ./location')
- locations = [location_elem.get_instance(Location) for location_elem in location_elems]
+ locations = [dict(location_elem.get_instance(Location)) for location_elem in location_elems]
if len(locations) > 0:
node['location'] = locations[0]
# get interfaces
iface_elems = node_elem.xpath('./default:interface | ./interface')
- node['interfaces'] = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
+ node['interfaces'] = [dict(iface_elem.get_instance(Interface)) for iface_elem in iface_elems]
# get services
node['services'] = PGv2Services.get_services(node_elem)
# get slivers
node['slivers'] = PGv2SliverType.get_slivers(node_elem)
- available_elems = node_elem.xpath('./default:available | ./available')
- if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
+
+ # get boot state
+ available_elems = node_elem.xpath('./default:available | ./available')
+ if len(available_elems) > 0 and 'now' in available_elems[0].attrib:
if available_elems[0].attrib.get('now', '').lower() == 'true':
node['boot_state'] = 'boot'
else:
node['boot_state'] = 'disabled'
+
+ # get initscripts
+ try:
+ node['pl_initscripts'] = []
+ initscript_elems = node_elem.xpath('./default:sliver_type/planetlab:initscript | ./sliver_type/initscript')
+ if len(initscript_elems) > 0:
+ for initscript_elem in initscript_elems:
+ if 'name' in initscript_elem.attrib:
+ node['pl_initscripts'].append(dict(initscript_elem.attrib))
+ except:
+ pass
+
+ # get node tags
+ try:
+ tag_elems = node_elem.xpath('./planetlab:attribute | ./attribute')
+ node['tags'] = []
+ if len(tag_elems) > 0:
+ for tag_elem in tag_elems:
+ tag = dict(tag_elem.get_instance(Attribute))
+ tag['tagname'] = tag.pop('name')
+ node['tags'].append(tag)
+ except:
+ pass
+
return nodes
from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from sfa.rspecs.elements.lease import Lease
-from sfa.planetlab.plxrn import xrn_to_hostname
class SFAv1Lease:
lease_elems = []
for lease in grouped_leases:
- lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
+ #lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
lease_fields = ['slice_id', 'start_time', 'duration']
lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
for node_elem in node_elems:
lease = Lease(lease_elem.attrib, lease_elem)
lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ #lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
+ lease['start_time'] = lease_elem.attrib['start_time']
lease['duration'] = lease_elem.attrib['duration']
lease['component_id'] = node_elem.attrib['component_id']
leases.append(lease)
from sfa.util.sfalogging import logger
from sfa.util.xml import XpathFilter
-from sfa.util.xrn import Xrn
+from sfa.util.xrn import Xrn, get_leaf
from sfa.rspecs.elements.element import Element
from sfa.rspecs.elements.node import NodeElement
from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
-from sfa.planetlab.plxrn import xrn_to_hostname
class SFAv1Node:
@staticmethod
- def add_nodes(xml, nodes):
+ def add_nodes(xml, nodes, rspec_content_type=None):
network_elems = xml.xpath('//network')
if len(network_elems) > 0:
network_elem = network_elems[0]
# set component_name attribute and hostname element
if 'component_id' in node and node['component_id']:
- component_name = xrn_to_hostname(node['component_id'])
+ component_name = Xrn.unescape(get_leaf(Xrn(node['component_id']).get_hrn()))
node_elem.set('component_name', component_name)
hostname_elem = node_elem.add_element('hostname')
hostname_elem.set_text(component_name)
tag_elem.set_text(tag['value'])
SFAv1Sliver.add_slivers(node_elem, node.get('slivers', []))
+ # add sliver tag in Request Rspec
+ if rspec_content_type == "request":
+ node_elem.add_instance('sliver', '', [])
+
@staticmethod
def add_slivers(xml, slivers):
component_ids = []
node['authority_id'] = node_elem.attrib['site_id']
# get location
location_elems = node_elem.xpath('./default:location | ./location')
- locations = [loc_elem.get_instance(Location) for loc_elem in location_elems]
+ locations = [dict(loc_elem.get_instance(Location)) for loc_elem in location_elems]
if len(locations) > 0:
node['location'] = locations[0]
# get bwlimit
node['bwlimit'] = bwlimits[0]
# get interfaces
iface_elems = node_elem.xpath('./default:interface | ./interface')
- ifaces = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
+ ifaces = [dict(iface_elem.get_instance(Interface)) for iface_elem in iface_elems]
node['interfaces'] = ifaces
# get services
node['services'] = PGv2Services.get_services(node_elem)
node['tags'] = SFAv1PLTag.get_pl_tags(node_elem, ignore=NodeElement.fields+["hardware_type"])
# get hardware types
hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
- node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
+ node['hardware_types'] = [dict(hw_type.get_instance(HardwareType)) for hw_type in hardware_type_elems]
# temporary... play nice with old slice manager rspec
if not node['component_name']:
for elem in xml.iterchildren():
if elem.tag not in ignore:
pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})
- pl_tags.append(pl_tag)
+ pl_tags.append(dict(pl_tag))
return pl_tags
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
-from sfa.planetlab.plxrn import PlXrn
class SFAv1Sliver:
from sfa.rspecs.version import RSpecVersion
import sys
-from sfa.rspecs.elements.versions.slabv1Lease import Slabv1Lease
-from sfa.rspecs.elements.versions.slabv1Node import Slabv1Node
-from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+# from sfa.rspecs.elements.versions.iotlabv1Lease import Iotlabv1Lease
+from sfa.rspecs.elements.versions.iotlabv1Node import Iotlabv1Node
+from sfa.rspecs.elements.versions.iotlabv1Sliver import Iotlabv1Sliver
from sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease
from sfa.util.sfalogging import logger
-
-class Slabv1(RSpecVersion):
+
+
+class Iotlabv1(RSpecVersion):
+ """
+ Defines Iotlab style RSpec and associated methods to parse and create a
+ valid Iotlab XML Rspec.
+ """
#enabled = True
- type = 'Slab'
+ type = 'Iotlab'
content_type = 'ad'
version = '1'
#template = '<RSpec type="%s"></RSpec>' % type
}
namespaces = dict(extensions.items() + [('default', namespace)])
elements = []
-
- # Network
+
+ # Network
def get_networks(self):
- #WARNING Added //default:network to the xpath
+ #WARNING Added //default:network to the xpath
#otherwise network element not detected 16/07/12 SA
-
- network_elems = self.xml.xpath('//network | //default:network')
+
+ network_elems = self.xml.xpath('//network | //default:network')
networks = [network_elem.get_instance(fields=['name', 'slice']) for \
network_elem in network_elems]
- return networks
-
+ return networks
def add_network(self, network):
network_tags = self.xml.xpath('//network[@name="%s"]' % network)
network_tag = network_tags[0]
return network_tag
-
# Nodes
-
def get_nodes(self, filter=None):
- return Slabv1Node.get_nodes(self.xml, filter)
+ return Iotlabv1Node.get_nodes(self.xml, filter)
def get_nodes_with_slivers(self):
- return Slabv1Node.get_nodes_with_slivers(self.xml)
-
+ return Iotlabv1Node.get_nodes_with_slivers(self.xml)
+
def get_slice_timeslot(self ):
- return Slabv1Timeslot.get_slice_timeslot(self.xml)
-
+ return Iotlabv1Timeslot.get_slice_timeslot(self.xml)
+
def add_connection_information(self, ldap_username, sites_set):
- return Slabv1Node.add_connection_information(self.xml,ldap_username, sites_set)
-
- def add_nodes(self, nodes, check_for_dupes=False):
- return Slabv1Node.add_nodes(self.xml,nodes )
-
+ return Iotlabv1Node.add_connection_information(self.xml,ldap_username, sites_set)
+
+ def add_nodes(self, nodes, check_for_dupes=False, rspec_content_type=None):
+ return Iotlabv1Node.add_nodes(self.xml,nodes, rspec_content_type)
+
def merge_node(self, source_node_tag, network, no_dupes = False):
logger.debug("SLABV1 merge_node")
#if no_dupes and self.get_node_element(node['hostname']):
network_tag.append(deepcopy(source_node_tag))
# Slivers
-
- def get_sliver_attributes(self, hostname, node, network=None):
+
+ def get_sliver_attributes(self, hostname, node, network=None):
print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes hostname %s " %(hostname)
nodes = self.get_nodes({'component_id': '*%s*' %hostname})
- attribs = []
+ attribs = []
print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes-----------------nodes %s " %(nodes)
if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
node = nodes[0]
- #if node :
+ #if node :
#sliver = node.xpath('./default:sliver | ./sliver')
#sliver = node.xpath('./default:sliver', namespaces=self.namespaces)
sliver = node['slivers']
-
+
if sliver is not None and isinstance(sliver, list) and len(sliver) > 0:
sliver = sliver[0]
attribs = sliver
return attribs
def get_slice_attributes(self, network=None):
-
+
slice_attributes = []
nodes_with_slivers = self.get_nodes_with_slivers()
value = text
attribute = {'name': 'initscript', 'value': value, 'node_id': node}
slice_attributes.append(attribute)
-
+
return slice_attributes
def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
# all nodes hould already be present in the rspec. Remove all
# nodes that done have slivers
- print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers ----->get_node "
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t\t Iotlabv1.PY add_slivers ----->get_node "
for hostname in hostnames:
node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
if not node_elems:
continue
node_elem = node_elems[0]
-
+
# determine sliver types for this node
#TODO : add_slivers valid type of sliver needs to be changed 13/07/12 SA
- valid_sliver_types = ['slab-node', 'emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+ valid_sliver_types = ['iotlab-node', 'emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
#valid_sliver_types = ['emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
requested_sliver_type = None
for sliver_type in node_elem.get('slivers', []):
if sliver_type.get('type') in valid_sliver_types:
requested_sliver_type = sliver_type['type']
-
+
if not requested_sliver_type:
continue
sliver = {'type': requested_sliver_type,
'pl_tags': attributes}
- print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers node_elem %s sliver_type %s \r\n \r\n " %(node_elem, sliver_type)
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t\t Iotlabv1.PY add_slivers node_elem %s sliver_type %s \r\n \r\n " %(node_elem, sliver_type)
# remove available element
for available_elem in node_elem.xpath('./default:available | ./available'):
node_elem.remove(available_elem)
-
+
# remove interface elements
for interface_elem in node_elem.xpath('./default:interface | ./interface'):
node_elem.remove(interface_elem)
-
+
# remove existing sliver_type elements
for sliver_type in node_elem.get('slivers', []):
node_elem.element.remove(sliver_type.element)
#node_elem.set('sliver_id', sliver_id)
# add the sliver type elemnt
- Slabv1Sliver.add_slivers(node_elem.element, sliver)
- #Slabv1SliverType.add_slivers(node_elem.element, sliver)
+ Iotlabv1Sliver.add_slivers(node_elem.element, sliver)
+ #Iotlabv1SliverType.add_slivers(node_elem.element, sliver)
# remove all nodes without slivers
if not append:
parent.remove(node_elem.element)
def remove_slivers(self, slivers, network=None, no_dupes=False):
- Slabv1Node.remove_slivers(self.xml, slivers)
-
-
+ Iotlabv1Node.remove_slivers(self.xml, slivers)
+
+
# Utility
-
+
def merge(self, in_rspec):
"""
Merge contents for specified rspec with current rspec
if not in_rspec:
return
-
+
from sfa.rspecs.rspec import RSpec
-
+
if isinstance(in_rspec, RSpec):
rspec = in_rspec
else:
rspec = RSpec(in_rspec)
# just copy over all networks
#Attention special get_networks using //default:network xpath
- current_networks = self.get_networks()
+ current_networks = self.get_networks()
networks = rspec.version.get_networks()
for network in networks:
current_network = network.get('name')
-
+
# Leases
def get_leases(self, lease_filter=None):
return SFAv1Lease.get_leases(self.xml, lease_filter)
- #return Slabv1Lease.get_leases(self.xml, lease_filter)
+ #return Iotlabv1Lease.get_leases(self.xml, lease_filter)
def add_leases(self, leases, network = None, no_dupes=False):
SFAv1Lease.add_leases(self.xml, leases)
- #Slabv1Lease.add_leases(self.xml, leases)
+ #Iotlabv1Lease.add_leases(self.xml, leases)
+
+ # Spectrum
+
+ def get_channels(self, filter=None):
+ return []
+
+ def add_channels(self, channels, network = None, no_dupes=False):
+ pass
+
+ # Links
+
+ def get_links(self, network=None):
+ return []
+
+ def get_link_requests(self):
+ return []
+
+ def add_links(self, links):
+ pass
+ def add_link_requests(self, links):
+ pass
+
def cleanup(self):
# remove unncecessary elements, attributes
if self.type in ['request', 'manifest']:
# remove 'available' element from remaining node elements
self.xml.remove_element('//default:available | //available')
-
-
-class Slabv1Ad(Slabv1):
+
+
+class Iotlabv1Ad(Iotlabv1):
enabled = True
content_type = 'ad'
schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
#http://www.geni.net/resources/rspec/3/ad.xsd'
template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
-class Slabv1Request(Slabv1):
+class Iotlabv1Request(Iotlabv1):
enabled = True
content_type = 'request'
schema = 'http://senslab.info/resources/rspec/1/request.xsd'
#http://www.geni.net/resources/rspec/3/request.xsd
template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
-class Slabv1Manifest(Slabv1):
+class Iotlabv1Manifest(Iotlabv1):
enabled = True
content_type = 'manifest'
schema = 'http://senslab.info/resources/rspec/1/manifest.xsd'
if __name__ == '__main__':
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.rspec_elements import *
- r = RSpec('/tmp/slab.rspec')
- r.load_rspec_elements(Slabv1.elements)
- r.namespaces = Slabv1.namespaces
+ r = RSpec('/tmp/iotlab.rspec')
+ r.load_rspec_elements(Iotlabv1.elements)
+ r.namespaces = Iotlabv1.namespaces
print r.get(RSpecElements.NODE)
def get_nodes_with_slivers(self):
return NITOSv1Node.get_nodes_with_slivers(self.xml)
- def add_nodes(self, nodes, network = None, no_dupes=False):
- NITOSv1Node.add_nodes(self.xml, nodes)
+ def add_nodes(self, nodes, network = None, no_dupes=False, rspec_content_type=None):
+ NITOSv1Node.add_nodes(self.xml, nodes, rspec_content_type)
def merge_node(self, source_node_tag, network, no_dupes=False):
if no_dupes and self.get_node_element(node['hostname']):
# Links
def get_links(self, network=None):
- return PGv2Link.get_links(self.xml)
+ return []
def get_link_requests(self):
- return PGv2Link.get_link_requests(self.xml)
+ return []
def add_links(self, links):
- networks = self.get_networks()
- if len(networks) > 0:
- xml = networks[0].element
- else:
- xml = self.xml
- PGv2Link.add_links(xml, links)
-
+ pass
def add_link_requests(self, links):
- PGv2Link.add_link_requests(self.xml, links)
+ pass
# utility
def get_leases(self, filter=None):
return NITOSv1Lease.get_leases(self.xml, filter)
- def add_leases(self, leases, channels, network = None, no_dupes=False):
+ def add_leases(self, leases_channels, network = None, no_dupes=False):
+ leases, channels = leases_channels
NITOSv1Lease.add_leases(self.xml, leases, channels)
# Spectrum
def get_nodes_with_slivers(self):
return PGv2Node.get_nodes_with_slivers(self.xml)
- def add_nodes(self, nodes, check_for_dupes=False):
- return PGv2Node.add_nodes(self.xml, nodes)
+ def add_nodes(self, nodes, check_for_dupes=False, rspec_content_type=None):
+ return PGv2Node.add_nodes(self.xml, nodes, rspec_content_type)
def merge_node(self, source_node_tag):
# this is untested
def add_leases(self, leases, network = None, no_dupes=False):
PGv2Lease.add_leases(self.xml, leases)
+ # Spectrum
+
+ def get_channels(self, filter=None):
+ return []
+
+ def add_channels(self, channels, network = None, no_dupes=False):
+ pass
+
# Utility
def merge(self, in_rspec):
def get_nodes_with_slivers(self):
return SFAv1Node.get_nodes_with_slivers(self.xml)
- def add_nodes(self, nodes, network = None, no_dupes=False):
- SFAv1Node.add_nodes(self.xml, nodes)
+ def add_nodes(self, nodes, network = None, no_dupes=False, rspec_content_type=None):
+ SFAv1Node.add_nodes(self.xml, nodes, rspec_content_type)
def merge_node(self, source_node_tag, network, no_dupes=False):
if no_dupes and self.get_node_element(node['hostname']):
def add_leases(self, leases, network = None, no_dupes=False):
SFAv1Lease.add_leases(self.xml, leases)
+ # Spectrum
+
+ def get_channels(self, filter=None):
+ return []
+
+ def add_channels(self, channels, network = None, no_dupes=False):
+ pass
+
if __name__ == '__main__':
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.rspec_elements import *
+++ /dev/null
-#import sys
-from httplib import HTTPConnection, HTTPException, NotConnected
-import json
-#import datetime
-#from time import gmtime, strftime
-import os.path
-import sys
-#import urllib
-#import urllib2
-from sfa.util.config import Config
-#from sfa.util.xrn import hrn_to_urn, get_authority, Xrn, get_leaf
-
-from sfa.util.sfalogging import logger
-
-
-OAR_REQUEST_POST_URI_DICT = {'POST_job':{'uri': '/oarapi/jobs.json'},
- 'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json'},
- }
-
-POST_FORMAT = {'json' : {'content':"application/json", 'object':json},}
-
-#OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
- #'workdir':"/home/", 'walltime':""}
-
-
-
-class JsonPage:
- """Class used to manipulate jsopn pages given by OAR."""
- def __init__(self):
- #All are boolean variables
- self.concatenate = False
- #Indicates end of data, no more pages to be loaded.
- self.end = False
- self.next_page = False
- #Next query address
- self.next_offset = None
- #Json page
- self.raw_json = None
-
- def FindNextPage(self):
- """ Gets next data page from OAR when the query's results
- are too big to be transmitted in a single page.
- Uses the "links' item in the json returned to check if
- an additionnal page has to be loaded.
- Returns : next page , next offset query
- """
- if "links" in self.raw_json:
- for page in self.raw_json['links']:
- if page['rel'] == 'next':
- self.concatenate = True
- self.next_page = True
- self.next_offset = "?" + page['href'].split("?")[1]
- print>>sys.stderr, "\r\n \t FindNextPage NEXT LINK"
- return
-
- if self.concatenate :
- self.end = True
- self.next_page = False
- self.next_offset = None
-
- return
-
- #Otherwise, no next page and no concatenate, must be a single page
- #Concatenate the single page and get out of here.
- else:
- self.next_page = False
- self.concatenate = True
- self.next_offset = None
- return
-
- @staticmethod
- def ConcatenateJsonPages(saved_json_list):
- #reset items list
-
- tmp = {}
- tmp['items'] = []
-
- for page in saved_json_list:
- tmp['items'].extend(page['items'])
- return tmp
-
-
- def ResetNextPage(self):
- self.next_page = True
- self.next_offset = None
- self.concatenate = False
- self.end = False
-
-
-class OARrestapi:
- def __init__(self, config_file = '/etc/sfa/oar_config.py'):
- self.oarserver = {}
-
-
- self.oarserver['uri'] = None
- self.oarserver['postformat'] = 'json'
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
-
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
- #logger.setLevelDebug()
- self.oarserver['ip'] = self.OAR_IP
- self.oarserver['port'] = self.OAR_PORT
- self.jobstates = ['Terminated', 'Hold', 'Waiting', 'toLaunch', \
- 'toError', 'toAckReservation', 'Launching', \
- 'Finishing', 'Running', 'Suspended', 'Resuming',\
- 'Error']
-
- self.parser = OARGETParser(self)
-
-
- def GETRequestToOARRestAPI(self, request, strval=None, next_page=None, username = None ):
- self.oarserver['uri'] = \
- OARGETParser.OARrequests_uri_dict[request]['uri']
- #Get job details with username
- if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
- self.oarserver['uri'] += OARGETParser.OARrequests_uri_dict[request]['owner'] + username
- headers = {}
- data = json.dumps({})
- logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" %(request))
- if strval:
- self.oarserver['uri'] = self.oarserver['uri'].\
- replace("id",str(strval))
-
- if next_page:
- self.oarserver['uri'] += next_page
-
- if username:
- headers['X-REMOTE_IDENT'] = username
-
- logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
- self.oarserver['uri'] %s strval %s" \
- %(self.oarserver['uri'], strval))
- try :
- #seems that it does not work if we don't add this
- headers['content-length'] = '0'
-
- conn = HTTPConnection(self.oarserver['ip'], \
- self.oarserver['port'])
- conn.request("GET", self.oarserver['uri'], data, headers)
- resp = ( conn.getresponse()).read()
- conn.close()
-
- except HTTPException, error :
- logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s " \
- %(error))
- #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
- try:
- js_dict = json.loads(resp)
- #print "\r\n \t\t\t js_dict keys" , js_dict.keys(), " \r\n", js_dict
- return js_dict
-
- except ValueError, error:
- logger.log_exc("Failed to parse Server Response: %s ERROR %s"\
- %(js_dict, error))
- #raise ServerError("Failed to parse Server Response:" + js)
-
-
- def POSTRequestToOARRestAPI(self, request, datadict, username=None):
- """ Used to post a job on OAR , along with data associated
- with the job.
-
- """
-
- #first check that all params for are OK
- try:
- self.oarserver['uri'] = OAR_REQUEST_POST_URI_DICT[request]['uri']
-
- except KeyError:
- logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
- valid")
- return
- if datadict and 'strval' in datadict:
- self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
- str(datadict['strval']))
- del datadict['strval']
-
- data = json.dumps(datadict)
- headers = {'X-REMOTE_IDENT':username, \
- 'content-type': POST_FORMAT['json']['content'], \
- 'content-length':str(len(data))}
- try :
-
- conn = HTTPConnection(self.oarserver['ip'], \
- self.oarserver['port'])
- conn.request("POST", self.oarserver['uri'], data, headers)
- resp = (conn.getresponse()).read()
- conn.close()
- except NotConnected:
- logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
- data %s \r\n \t\n \t\t headers %s uri %s" \
- %(data,headers,self.oarserver['uri']))
-
- #raise ServerError("POST_OAR_SRVR : error")
-
- try:
- answer = json.loads(resp)
- logger.debug("POSTRequestToOARRestAPI : answer %s" %(answer))
- return answer
-
- except ValueError, error:
- logger.log_exc("Failed to parse Server Response: error %s \
- %s" %(error))
- #raise ServerError("Failed to parse Server Response:" + answer)
-
-
-
-def AddOarNodeId(tuplelist, value):
- """ Adds Oar internal node id to the nodes attributes """
-
- tuplelist.append(('oar_id', int(value)))
-
-
-def AddNodeNetworkAddr(dictnode, value):
- #Inserts new key. The value associated is a tuple list
- node_id = value
-
- dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]
-
- return node_id
-
-def AddNodeSite(tuplelist, value):
- tuplelist.append(('site', str(value)))
-
-def AddNodeRadio(tuplelist, value):
- tuplelist.append(('radio', str(value)))
-
-
-def AddMobility(tuplelist, value):
- if value is 0:
- tuplelist.append(('mobile', 'False'))
- else :
- tuplelist.append(('mobile', 'True'))
-
-def AddPosX(tuplelist, value):
- tuplelist.append(('posx', value))
-
-def AddPosY(tuplelist, value):
- tuplelist.append(('posy', value))
-
-def AddPosZ(tuplelist, value):
- tuplelist.append(('posz', value))
-
-def AddBootState(tuplelist, value):
- tuplelist.append(('boot_state', str(value)))
-
-#Insert a new node into the dictnode dictionary
-def AddNodeId(dictnode, value):
- #Inserts new key. The value associated is a tuple list
- node_id = int(value)
-
- dictnode[node_id] = [('node_id', node_id)]
- return node_id
-
-def AddHardwareType(tuplelist, value):
- value_list = value.split(':')
- tuplelist.append(('archi', value_list[0]))
- tuplelist.append(('radio', value_list[1]))
-
-
-class OARGETParser:
- resources_fulljson_dict = {
- 'network_address' : AddNodeNetworkAddr,
- 'site': AddNodeSite,
- 'radio': AddNodeRadio,
- 'mobile': AddMobility,
- 'x': AddPosX,
- 'y': AddPosY,
- 'z':AddPosZ,
- 'archi':AddHardwareType,
- 'state':AddBootState,
- 'id' : AddOarNodeId,
- }
-
-
- def __init__(self, srv) :
- self.version_json_dict = {
- 'api_version' : None , 'apilib_version' :None,\
- 'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
- self.config = Config()
- self.interface_hrn = self.config.SFA_INTERFACE_HRN
- self.timezone_json_dict = {
- 'timezone': None, 'api_timestamp': None, }
- #self.jobs_json_dict = {
- #'total' : None, 'links' : [],\
- #'offset':None , 'items' : [], }
- #self.jobs_table_json_dict = self.jobs_json_dict
- #self.jobs_details_json_dict = self.jobs_json_dict
- self.server = srv
- self.node_dictlist = {}
-
- self.json_page = JsonPage()
-
- self.site_dict = {}
- self.SendRequest("GET_version")
-
-
-
-
-
- def ParseVersion(self) :
- #print self.json_page.raw_json
- #print >>sys.stderr, self.json_page.raw_json
- if 'oar_version' in self.json_page.raw_json :
- self.version_json_dict.update(api_version = \
- self.json_page.raw_json['api_version'],
- apilib_version = self.json_page.raw_json['apilib_version'],
- api_timezone = self.json_page.raw_json['api_timezone'],
- api_timestamp = self.json_page.raw_json['api_timestamp'],
- oar_version = self.json_page.raw_json['oar_version'] )
- else :
- self.version_json_dict.update(api_version = \
- self.json_page.raw_json['api'] ,
- apilib_version = self.json_page.raw_json['apilib'],
- api_timezone = self.json_page.raw_json['api_timezone'],
- api_timestamp = self.json_page.raw_json['api_timestamp'],
- oar_version = self.json_page.raw_json['oar'] )
-
- print self.version_json_dict['apilib_version']
-
-
- def ParseTimezone(self) :
- api_timestamp = self.json_page.raw_json['api_timestamp']
- api_tz = self.json_page.raw_json['timezone']
- return api_timestamp, api_tz
-
- def ParseJobs(self) :
- self.jobs_list = []
- print " ParseJobs "
- return self.json_page.raw_json
-
- def ParseJobsTable(self) :
- print "ParseJobsTable"
-
- def ParseJobsDetails (self):
- # currently, this function is not used a lot,
- #so i have no idea what be usefull to parse,
- #returning the full json. NT
- #logger.debug("ParseJobsDetails %s " %(self.json_page.raw_json))
- return self.json_page.raw_json
-
-
- def ParseJobsIds(self):
-
- job_resources = ['wanted_resources', 'name', 'id', 'start_time', \
- 'state','owner','walltime','message']
-
-
- job_resources_full = ['launching_directory', 'links', \
- 'resubmit_job_id', 'owner', 'events', 'message', \
- 'scheduled_start', 'id', 'array_id', 'exit_code', \
- 'properties', 'state','array_index', 'walltime', \
- 'type', 'initial_request', 'stop_time', 'project',\
- 'start_time', 'dependencies','api_timestamp','submission_time', \
- 'reservation', 'stdout_file', 'types', 'cpuset_name', \
- 'name', 'wanted_resources','queue','stderr_file','command']
-
-
- job_info = self.json_page.raw_json
- #logger.debug("OARESTAPI ParseJobsIds %s" %(self.json_page.raw_json))
- values = []
- try:
- for k in job_resources:
- values.append(job_info[k])
- return dict(zip(job_resources, values))
-
- except KeyError:
- logger.log_exc("ParseJobsIds KeyError ")
-
-
- def ParseJobsIdResources(self):
- """ Parses the json produced by the request
- /oarapi/jobs/id/resources.json.
- Returns a list of oar node ids that are scheduled for the
- given job id.
-
- """
- job_resources = []
- for resource in self.json_page.raw_json['items']:
- job_resources.append(resource['id'])
-
- #logger.debug("OARESTAPI \tParseJobsIdResources %s" %(self.json_page.raw_json))
- return job_resources
-
- def ParseResources(self) :
- """ Parses the json produced by a get_resources request on oar."""
-
- #logger.debug("OARESTAPI \tParseResources " )
- #resources are listed inside the 'items' list from the json
- self.json_page.raw_json = self.json_page.raw_json['items']
- self.ParseNodes()
-
- def ParseReservedNodes(self):
- """ Returns an array containing the list of the reserved nodes """
-
- #resources are listed inside the 'items' list from the json
- reservation_list = []
- job = {}
- #Parse resources info
- for json_element in self.json_page.raw_json['items']:
- #In case it is a real reservation (not asap case)
- if json_element['scheduled_start']:
- job['t_from'] = json_element['scheduled_start']
- job['t_until'] = int(json_element['scheduled_start']) + \
- int(json_element['walltime'])
- #Get resources id list for the job
- job['resource_ids'] = \
- [ node_dict['id'] for node_dict in json_element['resources']]
- else:
- job['t_from'] = "As soon as possible"
- job['t_until'] = "As soon as possible"
- job['resource_ids'] = ["Undefined"]
-
-
- job['state'] = json_element['state']
- job['lease_id'] = json_element['id']
-
-
- job['user'] = json_element['owner']
- #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))
- reservation_list.append(job)
- #reset dict
- job = {}
- return reservation_list
-
- def ParseRunningJobs(self):
- """ Gets the list of nodes currently in use from the attributes of the
- running jobs.
-
- """
- logger.debug("OARESTAPI \tParseRunningJobs__________________________ ")
- #resources are listed inside the 'items' list from the json
- nodes = []
- for job in self.json_page.raw_json['items']:
- for node in job['nodes']:
- nodes.append(node['network_address'])
- return nodes
-
-
-
- def ParseDeleteJobs(self):
- """ No need to parse anything in this function.A POST
- is done to delete the job.
-
- """
- return
-
- def ParseResourcesFull(self) :
- """ This method is responsible for parsing all the attributes
- of all the nodes returned by OAR when issuing a get resources full.
- The information from the nodes and the sites are separated.
- Updates the node_dictlist so that the dictionnary of the platform's
- nodes is available afterwards.
-
- """
- logger.debug("OARRESTAPI ParseResourcesFull________________________ ")
- #print self.json_page.raw_json[1]
- #resources are listed inside the 'items' list from the json
- if self.version_json_dict['apilib_version'] != "0.2.10" :
- self.json_page.raw_json = self.json_page.raw_json['items']
- self.ParseNodes()
- self.ParseSites()
- return self.node_dictlist
-
- def ParseResourcesFullSites(self) :
- """ UNUSED. Originally used to get information from the sites.
- ParseResourcesFull is used instead.
-
- """
- if self.version_json_dict['apilib_version'] != "0.2.10" :
- self.json_page.raw_json = self.json_page.raw_json['items']
- self.ParseNodes()
- self.ParseSites()
- return self.site_dict
-
-
-
- def ParseNodes(self):
- """ Parse nodes properties from OAR
- Put them into a dictionary with key = node id and value is a dictionary
- of the node properties and properties'values.
-
- """
- node_id = None
- keys = self.resources_fulljson_dict.keys()
- keys.sort()
-
- for dictline in self.json_page.raw_json:
- node_id = None
- # dictionary is empty and/or a new node has to be inserted
- node_id = self.resources_fulljson_dict['network_address'](\
- self.node_dictlist, dictline['network_address'])
- for k in keys:
- if k in dictline:
- if k == 'network_address':
- continue
-
- self.resources_fulljson_dict[k](\
- self.node_dictlist[node_id], dictline[k])
-
- #The last property has been inserted in the property tuple list,
- #reset node_id
- #Turn the property tuple list (=dict value) into a dictionary
- self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
- node_id = None
-
- @staticmethod
- def slab_hostname_to_hrn( root_auth, hostname):
- return root_auth + '.'+ hostname
-
-
-
- def ParseSites(self):
- """ Returns a list of dictionnaries containing the sites' attributes."""
-
- nodes_per_site = {}
- config = Config()
- #logger.debug(" OARrestapi.py \tParseSites self.node_dictlist %s"\
- #%(self.node_dictlist))
- # Create a list of nodes per site_id
- for node_id in self.node_dictlist:
- node = self.node_dictlist[node_id]
-
- if node['site'] not in nodes_per_site:
- nodes_per_site[node['site']] = []
- nodes_per_site[node['site']].append(node['node_id'])
- else:
- if node['node_id'] not in nodes_per_site[node['site']]:
- nodes_per_site[node['site']].append(node['node_id'])
-
- #Create a site dictionary whose key is site_login_base (name of the site)
- # and value is a dictionary of properties, including the list
- #of the node_ids
- for node_id in self.node_dictlist:
- node = self.node_dictlist[node_id]
- #node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, \
- #node['site'],node['hostname'])})
- node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, node['hostname'])})
- self.node_dictlist.update({node_id:node})
-
- if node['site'] not in self.site_dict:
- self.site_dict[node['site']] = {
- 'site':node['site'],
- 'node_ids':nodes_per_site[node['site']],
- 'latitude':"48.83726",
- 'longitude':"- 2.10336",'name':config.SFA_REGISTRY_ROOT_AUTH,
- 'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
- 'max_slivers':None, 'is_public':True, 'peer_site_id': None,
- 'abbreviated_name':"senslab", 'address_ids': [],
- 'url':"http,//www.senslab.info", 'person_ids':[],
- 'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
- 'date_created': None, 'peer_id': None }
- #if node['site_login_base'] not in self.site_dict.keys():
- #self.site_dict[node['site_login_base']] = {'login_base':node['site_login_base'],
- #'node_ids':nodes_per_site[node['site_login_base']],
- #'latitude':"48.83726",
- #'longitude':"- 2.10336",'name':"senslab",
- #'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
- #'max_slivers':None, 'is_public':True, 'peer_site_id': None,
- #'abbreviated_name':"senslab", 'address_ids': [],
- #'url':"http,//www.senslab.info", 'person_ids':[],
- #'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
- #'date_created': None, 'peer_id': None }
-
-
-
-
- OARrequests_uri_dict = {
- 'GET_version':
- {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
- 'GET_timezone':
- {'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
- 'GET_jobs':
- {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
- 'GET_jobs_id':
- {'uri':'/oarapi/jobs/id.json','parse_func': ParseJobsIds},
- 'GET_jobs_id_resources':
- {'uri':'/oarapi/jobs/id/resources.json',\
- 'parse_func': ParseJobsIdResources},
- 'GET_jobs_table':
- {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
- 'GET_jobs_details':
- {'uri':'/oarapi/jobs/details.json',\
- 'parse_func': ParseJobsDetails},
- 'GET_reserved_nodes':
- {'uri':
- '/oarapi/jobs/details.json?state=Running,Waiting,Launching',\
- 'owner':'&user=',
- 'parse_func':ParseReservedNodes},
-
-
- 'GET_running_jobs':
- {'uri':'/oarapi/jobs/details.json?state=Running',\
- 'parse_func':ParseRunningJobs},
- 'GET_resources_full':
- {'uri':'/oarapi/resources/full.json',\
- 'parse_func': ParseResourcesFull},
- 'GET_sites':
- {'uri':'/oarapi/resources/full.json',\
- 'parse_func': ParseResourcesFullSites},
- 'GET_resources':
- {'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
- 'DELETE_jobs_id':
- {'uri':'/oarapi/jobs/id.json' ,'parse_func': ParseDeleteJobs}
- }
-
-
-
-
- def SendRequest(self, request, strval = None , username = None):
- """ Connects to OAR , sends the valid GET requests and uses
- the appropriate json parsing functions.
-
- """
- save_json = None
-
- self.json_page.ResetNextPage()
- save_json = []
-
- if request in self.OARrequests_uri_dict :
- while self.json_page.next_page:
- self.json_page.raw_json = self.server.GETRequestToOARRestAPI(\
- request, \
- strval, \
- self.json_page.next_offset, \
- username)
- self.json_page.FindNextPage()
- if self.json_page.concatenate:
- save_json.append(self.json_page.raw_json)
-
- if self.json_page.concatenate and self.json_page.end :
- self.json_page.raw_json = \
- self.json_page.ConcatenateJsonPages(save_json)
-
- return self.OARrequests_uri_dict[request]['parse_func'](self)
- else:
- logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST " \
- %(request))
-
+++ /dev/null
-#import time
-from sfa.util.xrn import hrn_to_urn, urn_to_hrn, get_authority
-
-from sfa.rspecs.rspec import RSpec
-#from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.login import Login
-from sfa.rspecs.elements.services import ServicesElement
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.lease import Lease
-from sfa.rspecs.elements.granularity import Granularity
-from sfa.rspecs.version_manager import VersionManager
-
-
-from sfa.rspecs.elements.versions.slabv1Node import SlabPosition, SlabNode, \
- SlabLocation
-from sfa.util.sfalogging import logger
-
-from sfa.util.xrn import Xrn
-
-def slab_xrn_to_hostname(xrn):
- return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
-
-def slab_xrn_object(root_auth, hostname):
- """Attributes are urn and hrn.
- Get the hostname using slab_xrn_to_hostname on the urn.
-
- :return: the senslab node's xrn
- :rtype: Xrn
- """
- return Xrn('.'.join( [root_auth, Xrn.escape(hostname)]), type='node')
-
-class SlabAggregate:
-
- sites = {}
- nodes = {}
- api = None
- interfaces = {}
- links = {}
- node_tags = {}
-
- prepared = False
-
- user_options = {}
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_slice_and_slivers(self, slice_xrn, login=None):
- """
- Get the slices and the associated leases if any from the senslab
- testbed. For each slice, get the nodes in the associated lease
- and create a sliver with the necessary info and insertinto the sliver
- dictionary, keyed on the node hostnames.
- Returns a dict of slivers based on the sliver's node_id.
- Called by get_rspec.
-
-
- :param slice_xrn: xrn of the slice
- :param login: user's login on senslab ldap
-
- :type slice_xrn: string
- :type login: string
- :reutnr : a list of slices dict and a dictionary of Sliver object
- :rtype: (list, dict)
-
- ..note: There is no slivers in senslab, only leases.
-
- """
- slivers = {}
- sfa_slice = None
- if slice_xrn is None:
- return (sfa_slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
- slice_name = slice_hrn
-
- slices = self.driver.slab_api.GetSlices(slice_filter= str(slice_name), \
- slice_filter_type = 'slice_hrn', \
- login=login)
-
- logger.debug("Slabaggregate api \tget_slice_and_slivers \
- sfa_slice %s \r\n slices %s self.driver.hrn %s" \
- %(sfa_slice, slices, self.driver.hrn))
- if slices == []:
- return (sfa_slice, slivers)
-
-
- # sort slivers by node id , if there is a job
- #and therefore, node allocated to this slice
- for sfa_slice in slices:
- try:
- node_ids_list = sfa_slice['node_ids']
- except KeyError:
- logger.log_exc("SLABAGGREGATE \t \
- get_slice_and_slivers KeyError ")
- continue
-
- for node in node_ids_list:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id':sliver_xrn.urn,
- 'name': sfa_slice['hrn'],
- 'type': 'slab-node',
- 'tags': []})
-
- slivers[node] = sliver
-
-
- #Add default sliver attribute :
- #connection information for senslab
- if get_authority (sfa_slice['hrn']) == self.driver.slab_api.root_auth:
- tmp = sfa_slice['hrn'].split('.')
- ldap_username = tmp[1].split('_')[0]
- ssh_access = None
- slivers['default_sliver'] = {'ssh': ssh_access , \
- 'login': ldap_username}
-
- #TODO get_slice_and_slivers Find the login of the external user
-
- logger.debug("SLABAGGREGATE api get_slice_and_slivers slivers %s "\
- %(slivers))
- return (slices, slivers)
-
-
-
- def get_nodes(self, slices=None, slivers=[], options=None):
- # NT: the semantic of this function is not clear to me :
- # if slice is not defined, then all the nodes should be returned
- # if slice is defined, we should return only the nodes that
- # are part of this slice
- # but what is the role of the slivers parameter ?
- # So i assume that slice['node_ids'] will be the same as slivers for us
- #filter_dict = {}
- #if slice_xrn:
- #if not slices or not slices['node_ids']:
- #return ([],[])
- #tags_filter = {}
-
- # get the granularity in second for the reservation system
- grain = self.driver.slab_api.GetLeaseGranularity()
-
-
- nodes = self.driver.slab_api.GetNodes()
- #geni_available = options.get('geni_available')
- #if geni_available:
- #filter['boot_state'] = 'boot'
-
- #filter.update({'peer_id': None})
- #nodes = self.driver.slab_api.GetNodes(filter['hostname'])
-
- #site_ids = []
- #interface_ids = []
- #tag_ids = []
- nodes_dict = {}
-
- #for node in nodes:
-
- #nodes_dict[node['node_id']] = node
- #logger.debug("SLABAGGREGATE api get_nodes nodes %s "\
- #%(nodes ))
- # get sites
- #sites_dict = self.get_sites({'site_id': site_ids})
- # get interfaces
- #interfaces = self.get_interfaces({'interface_id':interface_ids})
- # get tags
- #node_tags = self.get_node_tags(tags_filter)
-
- #if slices, this means we got to list all the nodes given to this slice
- # Make a list of all the nodes in the slice before getting their
- #attributes
- rspec_nodes = []
- slice_nodes_list = []
- logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
- %(slices ))
- if slices is not None:
- for one_slice in slices:
- try:
- slice_nodes_list = one_slice['node_ids']
- except KeyError:
- pass
- #for node in one_slice['node_ids']:
- #slice_nodes_list.append(node)
-
- reserved_nodes = self.driver.slab_api.GetNodesCurrentlyInUse()
- logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
- %(slice_nodes_list))
- for node in nodes:
- nodes_dict[node['node_id']] = node
- if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
-
- rspec_node = SlabNode()
- # xxx how to retrieve site['login_base']
- #site_id=node['site_id']
- #site=sites_dict[site_id]
-
- rspec_node['mobile'] = node['mobile']
- rspec_node['archi'] = node['archi']
- rspec_node['radio'] = node['radio']
-
- slab_xrn = slab_xrn_object(self.driver.slab_api.root_auth, \
- node['hostname'])
- rspec_node['component_id'] = slab_xrn.urn
- rspec_node['component_name'] = node['hostname']
- rspec_node['component_manager_id'] = \
- hrn_to_urn(self.driver.slab_api.root_auth, \
- 'authority+sa')
-
- # Senslab's nodes are federated : there is only one authority
- # for all Senslab sites, registered in SFA.
- # Removing the part including the site
- # in authority_id SA 27/07/12
- rspec_node['authority_id'] = rspec_node['component_manager_id']
-
- # do not include boot state (<available> element)
- #in the manifest rspec
-
-
- rspec_node['boot_state'] = node['boot_state']
- if node['hostname'] in reserved_nodes:
- rspec_node['boot_state'] = "Reserved"
- rspec_node['exclusive'] = 'true'
- rspec_node['hardware_types'] = [HardwareType({'name': \
- 'slab-node'})]
-
-
- location = SlabLocation({'country':'France', 'site': \
- node['site']})
- rspec_node['location'] = location
-
-
- position = SlabPosition()
- for field in position :
- try:
- position[field] = node[field]
- except KeyError, error :
- logger.log_exc("SLABAGGREGATE\t get_nodes \
- position %s "%(error))
-
- rspec_node['position'] = position
- #rspec_node['interfaces'] = []
-
- # Granularity
- granularity = Granularity({'grain': grain})
- rspec_node['granularity'] = granularity
- rspec_node['tags'] = []
- if node['hostname'] in slivers:
- # add sliver info
- sliver = slivers[node['hostname']]
- rspec_node['sliver_id'] = sliver['sliver_id']
- rspec_node['client_id'] = node['hostname']
- rspec_node['slivers'] = [sliver]
-
- # slivers always provide the ssh service
- login = Login({'authentication': 'ssh-keys', \
- 'hostname': node['hostname'], 'port':'22', \
- 'username': sliver['name']})
- service = Services({'login': login})
- rspec_node['services'] = [service]
- rspec_nodes.append(rspec_node)
-
- return (rspec_nodes)
- #def get_all_leases(self, slice_record = None):
- def get_all_leases(self):
- """
- Get list of lease dictionaries which all have the mandatory keys
- ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
- All the leases running or scheduled are returned.
-
-
- ..note::There is no filtering of leases within a given time frame.
- All the running or scheduled leases are returned. options
- removed SA 15/05/2013
-
-
- """
-
- #now = int(time.time())
- #lease_filter = {'clip': now }
-
- #if slice_record:
- #lease_filter.update({'name': slice_record['name']})
-
- #leases = self.driver.slab_api.GetLeases(lease_filter)
- leases = self.driver.slab_api.GetLeases()
- grain = self.driver.slab_api.GetLeaseGranularity()
- site_ids = []
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
- #site = node['site_id']
- slab_xrn = slab_xrn_object(self.driver.slab_api.root_auth, node)
- rspec_lease['component_id'] = slab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in slab_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
-
-
-#from plc/aggregate.py
- def get_rspec(self, slice_xrn=None, login=None, version = None, \
- options=None):
-
- rspec = None
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- logger.debug("SlabAggregate \t get_rspec ***version %s \
- version.type %s version.version %s options %s \r\n" \
- %(version,version.type,version.version,options))
-
- if slice_xrn is None:
- rspec_version = version_manager._get_version(version.type, \
- version.version, 'ad')
-
- else:
- rspec_version = version_manager._get_version(version.type, \
- version.version, 'manifest')
-
- slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
- #at this point sliver may be empty if no senslab job
- #is running for this user/slice.
- rspec = RSpec(version=rspec_version, user_options=options)
-
-
- #if slice and 'expires' in slice:
- #rspec.xml.set('expires',\
- #datetime_to_string(utcparse(slice['expires']))
- # add sliver defaults
- #nodes, links = self.get_nodes(slice, slivers)
- logger.debug("\r\n \r\n SlabAggregate \tget_rspec *** \
- slice_xrn %s slices %s\r\n \r\n"\
- %(slice_xrn, slices))
-
- if options is not None:
- lease_option = options['list_leases']
- else:
- #If no options are specified, at least print the resources
- lease_option = 'all'
- #if slice_xrn :
- #lease_option = 'all'
-
-
- if lease_option in ['all', 'resources']:
- #if not options.get('list_leases') or options.get('list_leases')
- #and options['list_leases'] != 'leases':
- nodes = self.get_nodes(slices, slivers)
- logger.debug("\r\n \r\n SlabAggregate \ lease_option %s \
- get rspec ******* nodes %s"\
- %(lease_option, nodes[0]))
-
- sites_set = set([node['location']['site'] for node in nodes] )
-
- #In case creating a job, slice_xrn is not set to None
- rspec.version.add_nodes(nodes)
- if slice_xrn :
- #Get user associated with this slice
- #user = dbsession.query(RegRecord).filter_by(record_id = \
- #slices['record_id_user']).first()
-
- #ldap_username = (user.hrn).split('.')[1]
-
-
- #for one_slice in slices :
- ldap_username = slices[0]['hrn']
- tmp = ldap_username.split('.')
- ldap_username = tmp[1].split('_')[0]
-
- if version.type == "Slab":
- rspec.version.add_connection_information(ldap_username, \
- sites_set)
-
- default_sliver = slivers.get('default_sliver', [])
- if default_sliver:
- #default_sliver_attribs = default_sliver.get('tags', [])
- logger.debug("SlabAggregate \tget_rspec **** \
- default_sliver%s \r\n" %(default_sliver))
- for attrib in default_sliver:
- rspec.version.add_default_sliver_attribute(attrib, \
- default_sliver[attrib])
- if lease_option in ['all','leases']:
- #leases = self.get_all_leases(slices)
- leases = self.get_all_leases()
- rspec.version.add_leases(leases)
-
- #logger.debug("SlabAggregate \tget_rspec ******* rspec_toxml %s \r\n"\
- #%(rspec.toxml()))
- return rspec.toxml()
+++ /dev/null
-from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
-from sfa.util.sfalogging import logger
-from sfa.storage.alchemy import dbsession
-from sfa.storage.model import RegRecord
-
-
-
-from sfa.managers.driver import Driver
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-
-from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
-
-
-from sfa.senslab.slabpostgres import SlabDB
-
-
-from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname
-
-from sfa.senslab.slabslices import SlabSlices
-
-
-from sfa.senslab.slabapi import SlabTestbedAPI
-
-
-
-class SlabDriver(Driver):
- """ Senslab Driver class inherited from Driver generic class.
-
- Contains methods compliant with the SFA standard and the testbed
- infrastructure (calls to LDAP and OAR).
-
- .. seealso:: Driver class
-
- """
- def __init__(self, config):
- """
-
- Sets the senslab SFA config parameters ,
- instanciates the testbed api and the senslab database.
-
- :param config: senslab SFA configuration object
- :type config: Config object
- """
- Driver.__init__ (self, config)
- self.config = config
-
- self.db = SlabDB(config, debug = False)
- self.slab_api = SlabTestbedAPI(config)
- self.cache = None
-
- def augment_records_with_testbed_info (self, record_list ):
- """
-
- Adds specific testbed info to the records.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :return: list of records with extended information in each record
- :rtype: list
- """
- return self.fill_record_info (record_list)
-
- def fill_record_info(self, record_list):
- """
- For each SFA record, fill in the senslab specific and SFA specific
- fields in the record.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :return: list of records with extended information in each record
- :rtype: list
-
- .. warnings:: Should not be modifying record_list directly because modi
- fication are kept outside the method's scope. Howerver, there is no
- other way to do it given the way it's called in registry manager.
- """
-
- logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
- if not isinstance(record_list, list):
- record_list = [record_list]
-
-
- try:
- for record in record_list:
- #If the record is a SFA slice record, then add information
- #about the user of this slice. This kind of
- #information is in the Senslab's DB.
- if str(record['type']) == 'slice':
- if 'reg_researchers' in record and \
- isinstance(record['reg_researchers'], list) :
- record['reg_researchers'] = \
- record['reg_researchers'][0].__dict__
- record.update({'PI':[record['reg_researchers']['hrn']],
- 'researcher': [record['reg_researchers']['hrn']],
- 'name':record['hrn'],
- 'oar_job_id':[],
- 'node_ids': [],
- 'person_ids':[record['reg_researchers']['record_id']],
- 'geni_urn':'', #For client_helper.py compatibility
- 'keys':'', #For client_helper.py compatibility
- 'key_ids':''}) #For client_helper.py compatibility
-
-
- #Get slab slice record and oar job id if any.
- recslice_list = self.slab_api.GetSlices(slice_filter = \
- str(record['hrn']),\
- slice_filter_type = 'slice_hrn')
-
-
- logger.debug("SLABDRIVER \tfill_record_info \
- TYPE SLICE RECUSER record['hrn'] %s ecord['oar_job_id']\
- %s " %(record['hrn'], record['oar_job_id']))
- del record['reg_researchers']
- try:
- for rec in recslice_list:
- logger.debug("SLABDRIVER\r\n \t \
- fill_record_info oar_job_id %s " \
- %(rec['oar_job_id']))
-
- record['node_ids'] = [ self.slab_api.root_auth + \
- hostname for hostname in rec['node_ids']]
- except KeyError:
- pass
-
-
- logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
- recslice_list %s \r\n \t RECORD %s \r\n \
- \r\n" %(recslice_list, record))
-
- if str(record['type']) == 'user':
- #The record is a SFA user record.
- #Get the information about his slice from Senslab's DB
- #and add it to the user record.
- recslice_list = self.slab_api.GetSlices(\
- slice_filter = record['record_id'],\
- slice_filter_type = 'record_id_user')
-
- logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
- recslice_list %s \r\n \t RECORD %s \r\n" \
- %(recslice_list , record))
- #Append slice record in records list,
- #therefore fetches user and slice info again(one more loop)
- #Will update PIs and researcher for the slice
-
- recuser = recslice_list[0]['reg_researchers']
- logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
- recuser %s \r\n \r\n" %(recuser))
- recslice = {}
- recslice = recslice_list[0]
- recslice.update({'PI':[recuser['hrn']],
- 'researcher': [recuser['hrn']],
- 'name':record['hrn'],
- 'node_ids': [],
- 'oar_job_id': [],
- 'person_ids':[recuser['record_id']]})
- try:
- for rec in recslice_list:
- recslice['oar_job_id'].append(rec['oar_job_id'])
- except KeyError:
- pass
-
- recslice.update({'type':'slice', \
- 'hrn':recslice_list[0]['hrn']})
-
-
- #GetPersons takes [] as filters
- user_slab = self.slab_api.GetPersons([record])
-
-
- record.update(user_slab[0])
- #For client_helper.py compatibility
- record.update( { 'geni_urn':'',
- 'keys':'',
- 'key_ids':'' })
- record_list.append(recslice)
-
- logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
- INFO TO USER records %s" %(record_list))
-
-
- except TypeError, error:
- logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
- %(error))
-
- return record_list
-
-
- def sliver_status(self, slice_urn, slice_hrn):
- """
- Receive a status request for slice named urn/hrn
- urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
- shall return a structure as described in
- http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
- NT : not sure if we should implement this or not, but used by sface.
-
- :param slice_urn: slice urn
- :type slice_urn: string
- :param slice_hrn: slice hrn
- :type slice_hrn: string
-
- """
-
-
- #First get the slice with the slice hrn
- slice_list = self.slab_api.GetSlices(slice_filter = slice_hrn, \
- slice_filter_type = 'slice_hrn')
-
- if len(slice_list) is 0:
- raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
-
- #Used for fetching the user info witch comes along the slice info
- one_slice = slice_list[0]
-
-
- #Make a list of all the nodes hostnames in use for this slice
- slice_nodes_list = []
- #for single_slice in slice_list:
- #for node in single_slice['node_ids']:
- #slice_nodes_list.append(node['hostname'])
- for node in one_slice:
- slice_nodes_list.append(node['hostname'])
-
- #Get all the corresponding nodes details
- nodes_all = self.slab_api.GetNodes({'hostname':slice_nodes_list},
- ['node_id', 'hostname','site','boot_state'])
- nodeall_byhostname = dict([(one_node['hostname'], one_node) \
- for one_node in nodes_all])
-
-
-
- for single_slice in slice_list:
-
- #For compatibility
- top_level_status = 'empty'
- result = {}
- result.fromkeys(\
- ['geni_urn','pl_login','geni_status','geni_resources'], None)
- result['pl_login'] = one_slice['reg_researchers']['hrn']
- logger.debug("Slabdriver - sliver_status Sliver status \
- urn %s hrn %s single_slice %s \r\n " \
- %(slice_urn, slice_hrn, single_slice))
-
- if 'node_ids' not in single_slice:
- #No job in the slice
- result['geni_status'] = top_level_status
- result['geni_resources'] = []
- return result
-
- top_level_status = 'ready'
-
- #A job is running on Senslab for this slice
- # report about the local nodes that are in the slice only
-
- result['geni_urn'] = slice_urn
-
- resources = []
- for node in single_slice['node_ids']:
- res = {}
- #res['slab_hostname'] = node['hostname']
- #res['slab_boot_state'] = node['boot_state']
-
- res['pl_hostname'] = node['hostname']
- res['pl_boot_state'] = \
- nodeall_byhostname[node['hostname']]['boot_state']
- #res['pl_last_contact'] = strftime(self.time_format, \
- #gmtime(float(timestamp)))
- sliver_id = Xrn(slice_urn, type='slice', \
- id=nodeall_byhostname[node['hostname']]['node_id'], \
- authority=self.hrn).urn
-
- res['geni_urn'] = sliver_id
- node_name = node['hostname']
- if nodeall_byhostname[node_name]['boot_state'] == 'Alive':
-
- res['geni_status'] = 'ready'
- else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
- logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
- %(resources,res))
- return result
-
- @staticmethod
- def get_user_record(hrn):
- """
- Returns the user record based on the hrn from the SFA DB .
-
- :param hrn: user's hrn
- :type hrn: string
- :return : user record from SFA database
- :rtype: RegUser
-
- """
- return dbsession.query(RegRecord).filter_by(hrn = hrn).first()
-
-
- def testbed_name (self):
- """
- Returns testbed's name.
-
- :rtype: string
- """
- return self.hrn
-
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
- def aggregate_version (self):
- """
-
- Returns the testbed's supported rspec advertisement and
- request versions.
-
- :rtype: dict
- """
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- return {
- 'testbed':self.testbed_name(),
- 'geni_request_rspec_versions': request_rspec_versions,
- 'geni_ad_rspec_versions': ad_rspec_versions,
- }
-
-
-
- def _get_requested_leases_list(self, rspec):
- """
- Process leases in rspec depending on the rspec version (format)
- type. Find the lease requests in the rspec and creates
- a lease request list with the mandatory information ( nodes,
- start time and duration) of the valid leases (duration above or equal
- to the senslab experiment minimum duration).
-
- :param rspec: rspec request received.
- :type rspec: RSpec
- :return: list of lease requests found in the rspec
- :rtype: list
- """
- requested_lease_list = []
- for lease in rspec.version.get_leases():
- single_requested_lease = {}
- logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
-
- if not lease.get('lease_id'):
- if get_authority(lease['component_id']) == \
- self.slab_api.root_auth:
- single_requested_lease['hostname'] = \
- slab_xrn_to_hostname(\
- lease.get('component_id').strip())
- single_requested_lease['start_time'] = \
- lease.get('start_time')
- single_requested_lease['duration'] = lease.get('duration')
- #Check the experiment's duration is valid before adding
- #the lease to the requested leases list
- duration_in_seconds = \
- int(single_requested_lease['duration'])*60
- if duration_in_seconds > self.slab_api.GetMinExperimentDurationInSec() :
- requested_lease_list.append(single_requested_lease)
-
- return requested_lease_list
-
- @staticmethod
- def _group_leases_by_start_time(requested_lease_list):
- """
- Create dict of leases by start_time, regrouping nodes reserved
- at the same time, for the same amount of time so as to
- define one job on OAR.
-
- :param requested_lease_list: list of leases
- :type requested_lease_list: list
- :return: Dictionary with key = start time, value = list of leases
- with the same start time.
- :rtype: dictionary
- """
-
- requested_job_dict = {}
- for lease in requested_lease_list:
-
- #In case it is an asap experiment start_time is empty
- if lease['start_time'] == '':
- lease['start_time'] = '0'
-
- if lease['start_time'] not in requested_job_dict:
- if isinstance(lease['hostname'], str):
- lease['hostname'] = [lease['hostname']]
-
-
- requested_job_dict[lease['start_time']] = lease
-
- else :
- job_lease = requested_job_dict[lease['start_time']]
- if lease['duration'] == job_lease['duration'] :
- job_lease['hostname'].append(lease['hostname'])
-
- return requested_job_dict
-
- def _process_requested_jobs(self, rspec):
- """
- Turns the requested leases and information into a dictionary
- of requested jobs, grouped by starting time.
-
- :param rspec: RSpec received
- :type rspec : RSpec
- :rtype: dictionary
- """
- requested_lease_list = self._get_requested_leases_list(rspec)
- logger.debug("SLABDRIVER _process_requested_jobs requested_lease_list \
- %s"%(requested_lease_list))
- job_dict = self._group_leases_by_start_time(requested_lease_list)
- logger.debug("SLABDRIVER _process_requested_jobs job_dict\
- %s"%(job_dict))
-
- return job_dict
-
- def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
- users, options):
- """
- Answer to CreateSliver.
- Creates the leases and slivers for the users from the information
- found in the rspec string.
- Launch experiment on OAR if the requested leases is valid. Delete
- no longer requested leases.
-
-
- :param creds: user's credentials
- :type creds: string
- :param users: user record list
- :type users: list
- :param options:
- :type options:
-
- :return: a valid Rspec for the slice which has just been
- modified.
- :rtype: RSpec
-
-
- """
- aggregate = SlabAggregate(self)
-
- slices = SlabSlices(self)
- peer = slices.get_peer(slice_hrn)
- sfa_peer = slices.get_sfa_peer(slice_hrn)
- slice_record = None
-
- if not isinstance(creds, list):
- creds = [creds]
-
- if users:
- slice_record = users[0].get('slice_record', {})
- logger.debug("SLABDRIVER.PY \t ===============create_sliver \t\
- creds %s \r\n \r\n users %s" \
- %(creds, users))
- slice_record['user'] = {'keys':users[0]['keys'], \
- 'email':users[0]['email'], \
- 'hrn':slice_record['reg-researchers'][0]}
- # parse rspec
- rspec = RSpec(rspec_string)
- logger.debug("SLABDRIVER.PY \t create_sliver \trspec.version \
- %s slice_record %s users %s" \
- %(rspec.version,slice_record, users))
-
-
- # ensure site record exists?
- # ensure slice record exists
- #Removed options to verify_slice SA 14/08/12
- sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
- sfa_peer)
-
- # ensure person records exists
- #verify_persons returns added persons but since the return value
- #is not used
- slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
- sfa_peer, options=options)
- #requested_attributes returned by rspec.version.get_slice_attributes()
- #unused, removed SA 13/08/12
- #rspec.version.get_slice_attributes()
-
- logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
-
- # add/remove slice from nodes
-
- #requested_slivers = [node.get('component_id') \
- #for node in rspec.version.get_nodes_with_slivers()\
- #if node.get('authority_id') is self.slab_api.root_auth]
- #l = [ node for node in rspec.version.get_nodes_with_slivers() ]
- #logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
- #requested_slivers %s listnodes %s" \
- #%(requested_slivers,l))
- #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
- #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
-
-
- requested_job_dict = self._process_requested_jobs(rspec)
-
-
- logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s "\
- %(requested_job_dict))
- #verify_slice_leases returns the leases , but the return value is unused
- #here. Removed SA 13/08/12
- slices.verify_slice_leases(sfa_slice, \
- requested_job_dict, peer)
-
- return aggregate.get_rspec(slice_xrn=slice_urn, \
- login=sfa_slice['login'], version=rspec.version)
-
-
- def delete_sliver (self, slice_urn, slice_hrn, creds, options):
- """
- Deletes the lease associated with the slice hrn and the credentials
- if the slice belongs to senslab. Answer to DeleteSliver.
-
- :return: 1 if the slice to delete was not found on senslab,
- True if the deletion was successful, False otherwise otherwise.
-
- .. note:: Should really be named delete_leases because senslab does
- not have any slivers, but only deals with leases. However, SFA api only
- have delete_sliver define so far. SA 13.05/2013
- """
-
- sfa_slice_list = self.slab_api.GetSlices(slice_filter = slice_hrn, \
- slice_filter_type = 'slice_hrn')
-
- if not sfa_slice_list:
- return 1
-
- #Delete all leases in the slice
- for sfa_slice in sfa_slice_list:
-
-
- logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
- slices = SlabSlices(self)
- # determine if this is a peer slice
-
- peer = slices.get_peer(slice_hrn)
-
- logger.debug("SLABDRIVER.PY delete_sliver peer %s \
- \r\n \t sfa_slice %s " %(peer, sfa_slice))
- try:
-
- self.slab_api.DeleteSliceFromNodes(sfa_slice)
- return True
- except :
- return False
-
-
- def list_resources (self, slice_urn, slice_hrn, creds, options):
- """
- List resources from the senslab aggregate and returns a Rspec
- advertisement with resources found when slice_urn and slice_hrn are None
- (in case of resource discovery).
- If a slice hrn and urn are provided, list experiment's slice
- nodes in a rspec format. Answer to ListResources.
- Caching unused.
- :param options: options used when listing resources (list_leases, info,
- geni_available)
- :return: rspec string in xml
- :rtype: string
- """
-
- #cached_requested = options.get('cached', True)
-
- version_manager = VersionManager()
- # get the rspec's return format from options
- rspec_version = \
- version_manager.get_version(options.get('geni_rspec_version'))
- version_string = "rspec_%s" % (rspec_version)
-
- #panos adding the info option to the caching key (can be improved)
- if options.get('info'):
- version_string = version_string + "_" + \
- options.get('info', 'default')
-
- # Adding the list_leases option to the caching key
- if options.get('list_leases'):
- version_string = version_string + "_" + \
- options.get('list_leases', 'default')
-
- # Adding geni_available to caching key
- if options.get('geni_available'):
- version_string = version_string + "_" + \
- str(options.get('geni_available'))
-
- # look in cache first
- #if cached_requested and self.cache and not slice_hrn:
- #rspec = self.cache.get(version_string)
- #if rspec:
- #logger.debug("SlabDriver.ListResources: \
- #returning cached advertisement")
- #return rspec
-
- #panos: passing user-defined options
- aggregate = SlabAggregate(self)
-
- rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
- version=rspec_version, options=options)
-
- # cache the result
- #if self.cache and not slice_hrn:
- #logger.debug("Slab.ListResources: stores advertisement in cache")
- #self.cache.add(version_string, rspec)
-
- return rspec
-
-
- def list_slices (self, creds, options):
- """
- Answer to ListSlices.
- List slices belonging to senslab, returns slice urns list.
- No caching used. Options unused but are defined in the SFA method
- api prototype.
-
- :return: slice urns list
- :rtype: list
-
- """
- # look in cache first
- #if self.cache:
- #slices = self.cache.get('slices')
- #if slices:
- #logger.debug("PlDriver.list_slices returns from cache")
- #return slices
-
- # get data from db
-
- slices = self.slab_api.GetSlices()
- logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
- slice_hrns = [slab_slice['hrn'] for slab_slice in slices]
-
- slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
- for slice_hrn in slice_hrns]
-
- # cache the result
- #if self.cache:
- #logger.debug ("SlabDriver.list_slices stores value in cache")
- #self.cache.add('slices', slice_urns)
-
- return slice_urns
-
-
- def register (self, sfa_record, hrn, pub_key):
- """
- Adding new user, slice, node or site should not be handled
- by SFA.
-
- ..warnings:: should not be used. Different components are in charge of
- doing this task. Adding nodes = OAR
- Adding users = LDAP Senslab
- Adding slice = Import from LDAP users
- Adding site = OAR
-
- :param sfa_record: record provided by the client of the
- Register API call.
- :type sfa_record: dict
- """
- return -1
-
-
- def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
- """No site or node record update allowed in Senslab.
- The only modifications authorized here are key deletion/addition
- on an existing user and password change.
- On an existing user, CAN NOT BE MODIFIED:
- 'first_name', 'last_name', 'email'
- DOES NOT EXIST IN SENSLAB:
- 'phone', 'url', 'bio','title', 'accepted_aup',
- A slice is bound to its user, so modifying the user's ssh key should
- modify the slice's GID after an import procedure.
-
- :param old_sfa_record: what is in the db for this hrn
- :param new_sfa_record: what was passed to the Update call
-
- ..seealso:: update in driver.py.
- """
-
- pointer = old_sfa_record['pointer']
- old_sfa_record_type = old_sfa_record['type']
-
- # new_key implemented for users only
- if new_key and old_sfa_record_type not in [ 'user' ]:
- raise UnknownSfaType(old_sfa_record_type)
-
-
- if old_sfa_record_type == "user":
- update_fields = {}
- all_fields = new_sfa_record
- for key in all_fields.keys():
- if key in ['key', 'password']:
- update_fields[key] = all_fields[key]
-
-
- if new_key:
- # must check this key against the previous one if it exists
- persons = self.slab_api.GetPersons([old_sfa_record])
- person = persons[0]
- keys = [person['pkey']]
- #Get all the person's keys
- keys_dict = self.slab_api.GetKeys(keys)
-
- # Delete all stale keys, meaning the user has only one key
- #at a time
- #TODO: do we really want to delete all the other keys?
- #Is this a problem with the GID generation to have multiple
- #keys? SA 30/05/13
- key_exists = False
- if key in keys_dict:
- key_exists = True
- else:
- #remove all the other keys
- for key in keys_dict:
- self.slab_api.DeleteKey(person, key)
- self.slab_api.AddPersonKey(person, \
- {'sshPublicKey': person['pkey']},{'sshPublicKey': new_key} )
- #self.slab_api.AddPersonKey(person, {'key_type': 'ssh', \
- #'key': new_key})
- return True
-
-
- def remove (self, sfa_record):
- """
- Removes users only. Mark the user as disabled in
- LDAP. The user and his slice are then deleted from the db by running an
- import on the registry.
-
-
-
- :param sfa_record: record is the existing sfa record in the db
- :type sfa_record: dict
-
- ..warning::As fas as the slice is concerned, here only the leases are
- removed from the slice. The slice is record itself is not removed from
- the db.
- TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
-
- TODO: return boolean for the slice part
- """
- sfa_record_type = sfa_record['type']
- hrn = sfa_record['hrn']
- if sfa_record_type == 'user':
-
- #get user from senslab ldap
- person = self.slab_api.GetPersons(sfa_record)
- #No registering at a given site in Senslab.
- #Once registered to the LDAP, all senslab sites are
- #accesible.
- if person :
- #Mark account as disabled in ldap
- return self.slab_api.DeletePerson(sfa_record)
-
- elif sfa_record_type == 'slice':
- if self.slab_api.GetSlices(slice_filter = hrn, \
- slice_filter_type = 'slice_hrn'):
- ret = self.slab_api.DeleteSlice(sfa_record)
-
-
-
- return True
-
-
+++ /dev/null
-from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
-
-from sfa.util.config import Config
-from sfa.util.sfalogging import logger
-
-from sqlalchemy import Column, Integer, String
-from sqlalchemy import Table, MetaData
-from sqlalchemy.ext.declarative import declarative_base
-
-from sqlalchemy.dialects import postgresql
-
-from sqlalchemy.exc import NoSuchTableError
-
-
-#Dict holding the columns names of the table as keys
-#and their type, used for creation of the table
-slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
-CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1', \
-'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
-
-#Dict with all the specific senslab tables
-tablenames_dict = {'slab_xp': slice_table}
-
-
-SlabBase = declarative_base()
-
-
-
-class SenslabXP (SlabBase):
- """ SQL alchemy class to manipulate slice_senslab table in
- slab_sfa database.
-
- """
- __tablename__ = 'slab_xp'
-
-
- slice_hrn = Column(String)
- job_id = Column(Integer, primary_key = True)
- end_time = Column(Integer, nullable = False)
-
-
- #oar_job_id = Column( Integer,default = -1)
- #node_list = Column(postgresql.ARRAY(String), nullable =True)
-
- def __init__ (self, slice_hrn =None, job_id=None, end_time=None):
- """
- Defines a row of the slice_senslab table
- """
- if slice_hrn:
- self.slice_hrn = slice_hrn
- if job_id :
- self.job_id = job_id
- if end_time:
- self.end_time = end_time
-
-
- def __repr__(self):
- """Prints the SQLAlchemy record to the format defined
- by the function.
- """
- result = "<slab_xp : slice_hrn = %s , job_id %s end_time = %s" \
- %(self.slice_hrn, self.job_id, self.end_time)
- result += ">"
- return result
-
-
-
-class SlabDB:
- """ SQL Alchemy connection class.
- From alchemy.py
- """
- def __init__(self, config, debug = False):
- self.sl_base = SlabBase
- dbname = "slab_sfa"
- if debug == True :
- l_echo_pool = True
- l_echo = True
- else :
- l_echo_pool = False
- l_echo = False
-
- self.slab_session = None
- # the former PostgreSQL.py used the psycopg2 directly and was doing
- #self.connection.set_client_encoding("UNICODE")
- # it's unclear how to achieve this in sqlalchemy, nor if it's needed
- # at all
- # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
- # we indeed have /var/lib/pgsql/data/postgresql.conf where
- # this setting is unset, it might be an angle to tweak that if need be
- # try a unix socket first - omitting the hostname does the trick
- unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"% \
- (config.SFA_DB_USER, config.SFA_DB_PASSWORD, \
- config.SFA_DB_PORT, dbname)
-
- # the TCP fallback method
- tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"% \
- (config.SFA_DB_USER, config.SFA_DB_PASSWORD, config.SFA_DB_HOST, \
- config.SFA_DB_PORT, dbname)
- for url in [ unix_url, tcp_url ] :
- try:
- self.slab_engine = create_engine (url, echo_pool = \
- l_echo_pool, echo = l_echo)
- self.check()
- self.url = url
- return
- except:
- pass
- self.slab_engine = None
- raise Exception, "Could not connect to database"
-
-
-
- def check (self):
- """ Cehck if a table exists by trying a selection
- on the table.
-
- """
- self.slab_engine.execute ("select 1").scalar()
-
-
- def session (self):
- """
- Creates a SQLalchemy session. Once the session object is created
- it should be used throughout the code for all the operations on
- tables for this given database.
-
- """
- if self.slab_session is None:
- Session = sessionmaker()
- self.slab_session = Session(bind = self.slab_engine)
- return self.slab_session
-
- def close_session(self):
- """
- Closes connection to database.
-
- """
- if self.slab_session is None: return
- self.slab_session.close()
- self.slab_session = None
-
-
- def exists(self, tablename):
- """
- Checks if the table specified as tablename exists.
-
- """
-
- try:
- metadata = MetaData (bind=self.slab_engine)
- table = Table (tablename, metadata, autoload=True)
- return True
-
- except NoSuchTableError:
- logger.log_exc("SLABPOSTGRES tablename %s does not exists" \
- %(tablename))
- return False
-
-
- def createtable(self):
- """
- Creates all the table sof the engine.
- Uses the global dictionnary holding the tablenames and the table schema.
-
- """
-
- logger.debug("SLABPOSTGRES createtable SlabBase.metadata.sorted_tables \
- %s \r\n engine %s" %(SlabBase.metadata.sorted_tables , slab_engine))
- SlabBase.metadata.create_all(slab_engine)
- return
-
-
-
-slab_alchemy = SlabDB(Config())
-slab_engine = slab_alchemy.slab_engine
-slab_dbsession = slab_alchemy.session()
+++ /dev/null
-from sfa.util.xrn import get_authority, urn_to_hrn
-from sfa.util.sfalogging import logger
-
-
-MAXINT = 2L**31-1
-
-class SlabSlices:
-
- rspec_to_slice_tag = {'max_rate':'net_max_rate'}
-
-
- def __init__(self, driver):
- """
- Get the reference to the driver here.
- """
- self.driver = driver
-
-
- def get_peer(self, xrn):
- """
- Find the authority of a resources based on its xrn.
- If the authority is Senslab (local) return None,
- Otherwise, look up in the DB if Senslab is federated with this site
- authority and returns its DB record if it is the case,
- """
- hrn, hrn_type = urn_to_hrn(xrn)
- #Does this slice belong to a local site or a peer senslab site?
- peer = None
-
- # get this slice's authority (site)
- slice_authority = get_authority(hrn)
- #Senslab stuff
- #This slice belongs to the current site
- if slice_authority == self.driver.slab_api.root_auth:
- site_authority = slice_authority
- return None
-
- site_authority = get_authority(slice_authority).lower()
- # get this site's authority (sfa root authority or sub authority)
-
- logger.debug("SLABSLICES \ get_peer slice_authority %s \
- site_authority %s hrn %s" %(slice_authority, \
- site_authority, hrn))
-
-
- # check if we are already peered with this site_authority
- #if so find the peer record
- peers = self.driver.slab_api.GetPeers(peer_filter = site_authority)
- for peer_record in peers:
-
- if site_authority == peer_record.hrn:
- peer = peer_record
- logger.debug(" SLABSLICES \tget_peer peer %s " %(peer))
- return peer
-
- def get_sfa_peer(self, xrn):
- hrn, hrn_type = urn_to_hrn(xrn)
-
- # return the authority for this hrn or None if we are the authority
- sfa_peer = None
- slice_authority = get_authority(hrn)
- site_authority = get_authority(slice_authority)
-
- if site_authority != self.driver.hrn:
- sfa_peer = site_authority
-
- return sfa_peer
-
-
- def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
- """
- Compare requested leases with the leases already scheduled/
- running in OAR. If necessary, delete and recreate modified leases,
- and delete no longer requested ones.
-
- :param sfa_slice: sfa slice record
- :param requested_jobs_dict: dictionary of requested leases
- :param peer: sfa peer
-
- :type sfa_slice: dict
- :type requested_jobs_dict: dict
- :type peer:
- :return: leases list of dictionary
- :rtype: list
-
- """
-
- logger.debug("SLABSLICES verify_slice_leases sfa_slice %s \
- "%( sfa_slice))
- #First get the list of current leases from OAR
- leases = self.driver.slab_api.GetLeases({'name':sfa_slice['hrn']})
- logger.debug("SLABSLICES verify_slice_leases requested_jobs_dict %s \
- leases %s "%(requested_jobs_dict, leases ))
-
- current_nodes_reserved_by_start_time = {}
- requested_nodes_by_start_time = {}
- leases_by_start_time = {}
- reschedule_jobs_dict = {}
-
-
- #Create reduced dictionary with key start_time and value
- # the list of nodes
- #-for the leases already registered by OAR first
- # then for the new leases requested by the user
-
- #Leases already scheduled/running in OAR
- for lease in leases :
- current_nodes_reserved_by_start_time[lease['t_from']] = \
- lease['reserved_nodes']
- leases_by_start_time[lease['t_from']] = lease
-
- #First remove job whose duration is too short
- for job in requested_jobs_dict.values():
- if job['duration'] < self.driver.slab_api.GetLeaseGranularity():
- del requested_jobs_dict[job['start_time']]
-
- #Requested jobs
- for start_time in requested_jobs_dict:
- requested_nodes_by_start_time[int(start_time)] = \
- requested_jobs_dict[start_time]['hostname']
- #Check if there is any difference between the leases already
- #registered in OAR and the requested jobs.
- #Difference could be:
- #-Lease deleted in the requested jobs
- #-Added/removed nodes
- #-Newly added lease
-
- logger.debug("SLABSLICES verify_slice_leases \
- requested_nodes_by_start_time %s \
- "%(requested_nodes_by_start_time ))
- #Find all deleted leases
- start_time_list = \
- list(set(leases_by_start_time.keys()).\
- difference(requested_nodes_by_start_time.keys()))
- deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
- for start_time in start_time_list]
-
-
-
- #Find added or removed nodes in exisiting leases
- for start_time in requested_nodes_by_start_time:
- logger.debug("SLABSLICES verify_slice_leases start_time %s \
- "%( start_time))
- if start_time in current_nodes_reserved_by_start_time:
-
- if requested_nodes_by_start_time[start_time] == \
- current_nodes_reserved_by_start_time[start_time]:
- continue
-
- else:
- update_node_set = \
- set(requested_nodes_by_start_time[start_time])
- added_nodes = \
- update_node_set.difference(\
- current_nodes_reserved_by_start_time[start_time])
- shared_nodes = \
- update_node_set.intersection(\
- current_nodes_reserved_by_start_time[start_time])
- old_nodes_set = \
- set(\
- current_nodes_reserved_by_start_time[start_time])
- removed_nodes = \
- old_nodes_set.difference(\
- requested_nodes_by_start_time[start_time])
- logger.debug("SLABSLICES verify_slice_leases \
- shared_nodes %s added_nodes %s removed_nodes %s"\
- %(shared_nodes, added_nodes,removed_nodes ))
- #If the lease is modified, delete it before
- #creating it again.
- #Add the deleted lease job id in the list
- #WARNING :rescheduling does not work if there is already
- # 2 running/scheduled jobs because deleting a job
- #takes time SA 18/10/2012
- if added_nodes or removed_nodes:
- deleted_leases.append(\
- leases_by_start_time[start_time]['lease_id'])
- #Reschedule the job
- if added_nodes or shared_nodes:
- reschedule_jobs_dict[str(start_time)] = \
- requested_jobs_dict[str(start_time)]
-
- else:
- #New lease
-
- job = requested_jobs_dict[str(start_time)]
- logger.debug("SLABSLICES \
- NEWLEASE slice %s job %s"\
- %(sfa_slice, job))
- self.driver.slab_api.AddLeases(job['hostname'], \
- sfa_slice, int(job['start_time']), \
- int(job['duration']))
-
- #Deleted leases are the ones with lease id not declared in the Rspec
- if deleted_leases:
- self.driver.slab_api.DeleteLeases(deleted_leases, sfa_slice['hrn'])
- logger.debug("SLABSLICES \
- verify_slice_leases slice %s deleted_leases %s"\
- %(sfa_slice, deleted_leases))
-
-
- if reschedule_jobs_dict :
- for start_time in reschedule_jobs_dict:
- job = reschedule_jobs_dict[start_time]
- self.driver.slab_api.AddLeases(job['hostname'], \
- sfa_slice, int(job['start_time']), \
- int(job['duration']))
- return leases
-
- def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
- current_slivers = []
- deleted_nodes = []
-
- if 'node_ids' in sfa_slice:
- nodes = self.driver.slab_api.GetNodes(sfa_slice['list_node_ids'], \
- ['hostname'])
- current_slivers = [node['hostname'] for node in nodes]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(current_slivers).\
- difference(requested_slivers))
- # add nodes from rspec
- #added_nodes = list(set(requested_slivers).\
- #difference(current_slivers))
-
-
- logger.debug("SLABSLICES \tverify_slice_nodes slice %s\
- \r\n \r\n deleted_nodes %s"\
- %(sfa_slice, deleted_nodes))
-
- if deleted_nodes:
- #Delete the entire experience
- self.driver.slab_api.DeleteSliceFromNodes(sfa_slice)
- #self.driver.DeleteSliceFromNodes(sfa_slice['slice_hrn'], \
- #deleted_nodes)
- return nodes
-
-
-
- def free_egre_key(self):
- used = set()
- for tag in self.driver.slab_api.GetSliceTags({'tagname': 'egre_key'}):
- used.add(int(tag['value']))
-
- for i in range(1, 256):
- if i not in used:
- key = i
- break
- else:
- raise KeyError("No more EGRE keys available")
-
- return str(key)
-
-
-
-
-
-
- def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer):
-
- #login_base = slice_hrn.split(".")[0]
- slicename = slice_hrn
- slices_list = self.driver.slab_api.GetSlices(slice_filter = slicename, \
- slice_filter_type = 'slice_hrn')
- sfa_slice = None
- if slices_list:
- for sl in slices_list:
-
- logger.debug("SLABSLICE \tverify_slice slicename %s \
- slices_list %s sl %s \ slice_record %s"\
- %(slicename, slices_list,sl, \
- slice_record))
- sfa_slice = sl
- sfa_slice.update(slice_record)
-
- else:
- #Search for user in ldap based on email SA 14/11/12
- ldap_user = self.driver.slab_api.ldap.LdapFindUser(\
- slice_record['user'])
- logger.debug(" SLABSLICES \tverify_slice Oups \
- slice_record %s sfa_peer %s ldap_user %s"\
- %(slice_record, sfa_peer, ldap_user ))
- #User already registered in ldap, meaning user should be in SFA db
- #and hrn = sfa_auth+ uid
- sfa_slice = {'hrn': slicename,
- #'url': slice_record.get('url', slice_hrn),
- #'description': slice_record.get('description', slice_hrn)
- 'node_list' : [],
- 'authority' : slice_record['authority'],
- 'gid':slice_record['gid'],
- #'record_id_user' : user.record_id,
- 'slice_id' : slice_record['record_id'],
- 'reg-researchers':slice_record['reg-researchers'],
- #'record_id_slice': slice_record['record_id'],
- 'peer_authority':str(sfa_peer)
-
- }
- if ldap_user :
- hrn = self.driver.slab_api.root_auth +'.'+ ldap_user['uid']
-
- user = self.driver.get_user_record(hrn)
-
- logger.debug(" SLABSLICES \tverify_slice hrn %s USER %s" \
- %(hrn, user))
- #sfa_slice = {'slice_hrn': slicename,
- ##'url': slice_record.get('url', slice_hrn),
- ##'description': slice_record.get('description', slice_hrn)
- #'node_list' : [],
- #'authority' : slice_record['authority'],
- #'gid':slice_record['gid'],
- ##'record_id_user' : user.record_id,
- #'slice_id' : slice_record['record_id'],
- #'reg-researchers':slice_record['reg-researchers'],
- ##'record_id_slice': slice_record['record_id'],
- #'peer_authority':str(peer.hrn)
-
- #}
- # add the slice
- if sfa_slice :
- self.driver.slab_api.AddSlice(sfa_slice, user)
-
- if peer:
- sfa_slice['slice_id'] = slice_record['record_id']
-
- #slice['slice_id'] = self.driver.slab_api.AddSlice(slice)
- logger.debug("SLABSLICES \tverify_slice ADDSLICE OK")
- #slice['node_ids']=[]
- #slice['person_ids'] = []
- #if peer:
- #sfa_slice['peer_slice_id'] = slice_record.get('slice_id', None)
- # mark this slice as an sfa peer record
- #if sfa_peer:
- #peer_dict = {'type': 'slice', 'hrn': slice_hrn,
- #'peer_authority': sfa_peer, 'pointer': \
- #slice['slice_id']}
- #self.registry.register_peer_object(self.credential, peer_dict)
-
-
-
- return sfa_slice
-
-
- def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, \
- options={}):
- """
- users is a record list. Records can either be local records
- or users records from known and trusted federated sites.
- If the user is from another site that senslab doesn't trust yet,
- then Resolve will raise an error before getting to create_sliver.
- """
- #TODO SA 21/08/12 verify_persons Needs review
-
- logger.debug("SLABSLICES \tverify_persons \tslice_hrn %s \
- \t slice_record %s\r\n users %s \t peer %s "\
- %( slice_hrn, slice_record, users, peer))
- users_by_id = {}
- #users_by_hrn = {}
- users_by_email = {}
- #users_dict : dict whose keys can either be the user's hrn or its id.
- #Values contains only id and hrn
- users_dict = {}
-
- #First create dicts by hrn and id for each user in the user record list:
- for info in users:
-
- if 'slice_record' in info :
- slice_rec = info['slice_record']
- user = slice_rec['user']
-
- if 'email' in user:
- users_by_email[user['email']] = user
- users_dict[user['email']] = user
-
- #if 'hrn' in user:
- #users_by_hrn[user['hrn']] = user
- #users_dict[user['hrn']] = user
-
- logger.debug( "SLABSLICE.PY \t verify_person \
- users_dict %s \r\n user_by_email %s \r\n \
- \tusers_by_id %s " \
- %(users_dict,users_by_email, users_by_id))
-
- existing_user_ids = []
- #existing_user_hrns = []
- existing_user_emails = []
- existing_users = []
- # Check if user is in Senslab LDAP using its hrn.
- # Assuming Senslab is centralised : one LDAP for all sites,
- # user'as record_id unknown from LDAP
- # LDAP does not provide users id, therefore we rely on hrns containing
- # the login of the user.
- # If the hrn is not a senslab hrn, the user may not be in LDAP.
-
- if users_by_email :
- #Construct the list of filters (list of dicts) for GetPersons
- filter_user = []
- for email in users_by_email :
- filter_user.append (users_by_email[email])
- #Check user's in LDAP with GetPersons
- #Needed because what if the user has been deleted in LDAP but
- #is still in SFA?
- existing_users = self.driver.slab_api.GetPersons(filter_user)
- logger.debug(" \r\n SLABSLICE.PY \tverify_person filter_user \
- %s existing_users %s " \
- %(filter_user, existing_users))
- #User's in senslab LDAP
- if existing_users:
- for user in existing_users :
- users_dict[user['email']].update(user)
- existing_user_emails.append(\
- users_dict[user['email']]['email'])
-
-
- # User from another known trusted federated site. Check
- # if a senslab account matching the email has already been created.
- else:
- req = 'mail='
- if isinstance(users, list):
-
- req += users[0]['email']
- else:
- req += users['email']
-
- ldap_reslt = self.driver.slab_api.ldap.LdapSearch(req)
-
- if ldap_reslt:
- logger.debug(" SLABSLICE.PY \tverify_person users \
- USER already in Senslab \t ldap_reslt %s \
- "%( ldap_reslt))
- existing_users.append(ldap_reslt[1])
-
- else:
- #User not existing in LDAP
- #TODO SA 21/08/12 raise smthg to add user or add it auto ?
- #new_record = {}
- #new_record['pkey'] = users[0]['keys'][0]
- #new_record['mail'] = users[0]['email']
-
- logger.debug(" SLABSLICE.PY \tverify_person users \
- not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
- ldap_reslt %s " %(users, ldap_reslt))
-
- requested_user_emails = users_by_email.keys()
- requested_user_hrns = \
- [users_by_email[user]['hrn'] for user in users_by_email]
- logger.debug("SLABSLICE.PY \tverify_person \
- users_by_email %s " %( users_by_email))
- #logger.debug("SLABSLICE.PY \tverify_person \
- #user_by_hrn %s " %( users_by_hrn))
-
-
- #Check that the user of the slice in the slice record
- #matches one of the existing users
- try:
- if slice_record['PI'][0] in requested_user_hrns:
- #if slice_record['record_id_user'] in requested_user_ids and \
- #slice_record['PI'][0] in requested_user_hrns:
- logger.debug(" SLABSLICE \tverify_person ['PI']\
- slice_record %s" %(slice_record))
-
- except KeyError:
- pass
-
-
- # users to be added, removed or updated
- #One user in one senslab slice : there should be no need
- #to remove/ add any user from/to a slice.
- #However a user from SFA which is not registered in Senslab yet
- #should be added to the LDAP.
- added_user_emails = set(requested_user_emails).\
- difference(set(existing_user_emails))
-
-
- #self.verify_keys(existing_slice_users, updated_users_list, \
- #peer, append)
-
- added_persons = []
- # add new users
-
- #requested_user_email is in existing_user_emails
- if len(added_user_emails) == 0:
-
- slice_record['login'] = users_dict[requested_user_emails[0]]['uid']
- logger.debug(" SLABSLICE \tverify_person QUICK DIRTY %s" \
- %(slice_record))
-
-
- for added_user_email in added_user_emails:
- #hrn, type = urn_to_hrn(added_user['urn'])
- added_user = users_dict[added_user_email]
- logger.debug(" SLABSLICE \r\n \r\n \t THE SECOND verify_person \
- added_user %s" %(added_user))
- person = {}
- person['peer_person_id'] = None
- k_list = ['first_name', 'last_name','person_id']
- for k in k_list:
- if k in added_user:
- person[k] = added_user[k]
-
- person['pkey'] = added_user['keys'][0]
- person['mail'] = added_user['email']
- person['email'] = added_user['email']
- person['key_ids'] = added_user.get('key_ids', [])
- #person['urn'] = added_user['urn']
-
- #person['person_id'] = self.driver.slab_api.AddPerson(person)
- ret = self.driver.slab_api.AddPerson(person)
- if type(ret) == int :
- person['uid'] = ret
-
- logger.debug(" SLABSLICE \r\n \r\n \t THE SECOND verify_person\
- personne %s" %(person))
- #Update slice_Record with the id now known to LDAP
- slice_record['login'] = person['uid']
-
- added_persons.append(person)
-
-
- return added_persons
-
- #Unused
- def verify_keys(self, persons, users, peer, options={}):
- # existing keys
- key_ids = []
- for person in persons:
- key_ids.extend(person['key_ids'])
- keylist = self.driver.slab_api.GetKeys(key_ids, ['key_id', 'key'])
-
- keydict = {}
- for key in keylist:
- keydict[key['key']] = key['key_id']
- existing_keys = keydict.keys()
-
- persondict = {}
- for person in persons:
- persondict[person['email']] = person
-
- # add new keys
- requested_keys = []
- updated_persons = []
- users_by_key_string = {}
- for user in users:
- user_keys = user.get('keys', [])
- updated_persons.append(user)
- for key_string in user_keys:
- users_by_key_string[key_string] = user
- requested_keys.append(key_string)
- if key_string not in existing_keys:
- key = {'key': key_string, 'key_type': 'ssh'}
- #try:
- ##if peer:
- #person = persondict[user['email']]
- #self.driver.slab_api.UnBindObjectFromPeer('person',
- #person['person_id'], peer['shortname'])
- ret = self.driver.slab_api.AddPersonKey(\
- user['email'], key)
- #if peer:
- #key_index = user_keys.index(key['key'])
- #remote_key_id = user['key_ids'][key_index]
- #self.driver.slab_api.BindObjectToPeer('key', \
- #key['key_id'], peer['shortname'], \
- #remote_key_id)
-
- #finally:
- #if peer:
- #self.driver.slab_api.BindObjectToPeer('person', \
- #person['person_id'], peer['shortname'], \
- #user['person_id'])
-
- # remove old keys (only if we are not appending)
- append = options.get('append', True)
- if append == False:
- removed_keys = set(existing_keys).difference(requested_keys)
- for key in removed_keys:
- #if peer:
- #self.driver.slab_api.UnBindObjectFromPeer('key', \
- #key, peer['shortname'])
-
- user = users_by_key_string[key]
- self.driver.slab_api.DeleteKey(user, key)
-
- return
-
\ No newline at end of file
if isinstance(value, list):
for item in value:
if isinstance(item, dict) and \
- set(ApiVersions.required_fields).issubset(item.keys()):
+ set(ApiVersions.required_fields).issubset(item.keys()) and \
+ item['version'] != '' and item['url'] != '':
versions[str(item['version'])] = item['url']
return versions
trusted_cert_objects.append(GID(filename=f))
ok_trusted_certs.append(f)
except Exception, exc:
- logger.error("Failed to load trusted cert from %s: %r", f, exc)
+ logger.error("Failed to load trusted cert from %s: %r"%( f, exc))
trusted_certs = ok_trusted_certs
# Use legacy verification if this is a legacy credential
###########################################################################
-# Copyright (C) 2012 by
-# <savakian@sfa2.grenoble.senslab.info>
+# Copyright (C) 2012 by
+# <savakian@sfa2.grenoble.iotlab.info>
#
# Copyright: See COPYING file that comes with this distribution
#
###########################################################################
-#LDAP import
-from sfa.senslab.LDAPapi import LDAPapi
+#LDAP import
+from sfa.iotlab.LDAPapi import LDAPapi
import ldap.modlist as modlist
-
#logger sfa
from sfa.util.sfalogging import logger
#OAR imports
from datetime import datetime
-from sfa.senslab.OARrestapi import OARrestapi
+from sfa.iotlab.OARrestapi import OARrestapi
-#Test slabdriver
-from sfa.senslab.slabdriver import SlabDriver
+#Test iotlabdriver
+from sfa.iotlab.iotlabdriver import IotlabDriver
from sfa.util.config import Config
import sys
-
+
def parse_options():
-
+
#arguments supplied
if len(sys.argv) > 1 :
options_list = sys.argv[1:]
valid_options_dict = {}
value_list = []
#Passing options to the script should be done like this :
- #-10 OAR -2 SlabDriver
+ #-10 OAR -2 IotlabDriver
for option in options_list:
if option in supported_options:
- #update the values used for the fonctions associated
+ #update the values used for the fonctions associated
#with the options
-
+
valid_options_dict[option] = value_list
#empty the values list for next option
value_list = []
print "value_list", value_list
- return valid_options_dict
-
+ return valid_options_dict
+
def TestLdap(job_id = None):
logger.setLevelDebug()
ldap_server = LDAPapi()
ret = ldap_server.conn.connect(bind=True)
- ldap_server.conn.close()
+ ldap_server.conn.close()
print "TEST ldap_server.conn.connect(bind=True)" , ret
-
+
ret = ldap_server.conn.connect(bind=False)
ldap_server.conn.close()
print "TEST ldap_server.conn.connect(bind=False)", ret
ret = ldap_server.LdapSearch()
print "TEST ldap_server.LdapSearch ALL", ret
-
+
ret = ldap_server.LdapSearch('(uid=avakian)', [])
print "\r\n TEST ldap_server.LdapSearch ids = avakian", ret
password = ldap_server.generate_password()
- print "\r\n TEST generate_password ", password
-
+ print "\r\n TEST generate_password ", password
+
maxi = ldap_server.find_max_uidNumber()
print "\r\n TEST find_max_uidNumber " , maxi
data['first_name'] = "Tim"
data['givenName'] = data['first_name']
data['mail'] = "robin@arkham.fr"
-
+
record = {}
- record['hrn'] = 'senslab.drake'
+ record['hrn'] = 'iotlab.drake'
record['last_name'] = "Drake"
record['first_name'] = "Tim"
record['mail'] = "robin@arkham.fr"
-
-
+
+
login = ldap_server.generate_login(data)
print "\r\n Robin \tgenerate_login ", ret, login
-
+
ret = ldap_server.LdapAddUser(data)
print "\r\n Robin \tLdapAddUser ", ret
req_ldap = '(uid=' + login + ')'
ret = ldap_server.LdapSearch(req_ldap, [])
print "\r\n Robin \tldap_server.LdapSearch ids = %s %s" % (login, ret)
-
+
password = "Thridrobin"
enc = ldap_server.encrypt_password(password)
print "\r\n Robin \tencrypt_password ", enc
-
+
ret = ldap_server.LdapModifyUser(record, {'userPassword':enc})
print "\r\n Robin \tChange password LdapModifyUser ", ret
-
+
#dn = 'uid=' + login + ',' + ldap_server.baseDN
#ret = ldap_server.LdapDelete(dn)
#print "\r\n Robin \tLdapDelete ", ret
-
+
datanight = {}
datanight['last_name'] = "Grayson"
datanight['first_name'] = "Dick"
datanight['givenName'] = datanight['first_name']
datanight['mail'] = "nightwing@arkham.fr"
-
-
+
+
record_night = {}
- record_night['hrn'] = 'senslab.grayson'
+ record_night['hrn'] = 'iotlab.grayson'
record_night['last_name'] = datanight['last_name']
record_night['first_name'] = datanight['first_name']
record_night['mail'] = datanight['mail']
-
+
ret = ldap_server.LdapFindUser(record_night)
print "\r\n Nightwing \tldap_server.LdapFindUser %s : %s" % (record_night, ret)
-
+
#ret = ldap_server.LdapSearch('(uid=grayson)', [])
#print "\r\n Nightwing \tldap_server.LdapSearch ids = %s %s" %('grayson',ret )
#ret = ldap_server.LdapAddUser(datanight)
- #print "\r\n Nightwing \tLdapAddUser ", ret
-
+ #print "\r\n Nightwing \tLdapAddUser ", ret
+
#ret = ldap_server.LdapResetPassword(record_night)
#print "\r\n Nightwing \tLdapResetPassword de %s : %s" % (record_night, ret)
-
+
ret = ldap_server.LdapDeleteUser(record_night)
- print "\r\n Nightwing \tLdapDeleteUser ", ret
-
-
+ print "\r\n Nightwing \tLdapDeleteUser ", ret
+
+
#record_avakian = {}
- #record_avakian['hrn']= 'senslab.avakian'
+ #record_avakian['hrn']= 'iotlab.avakian'
#record_avakian['last_name'] = 'avakian'
#record_avakian['first_name'] = 'sandrine'
#record_avakian['mail'] = 'sandrine.avakian@inria.fr'
- #pubkey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAwSUkJ+cr3xM47h8lFkIXJoJhg4wHakTaLJmgTXkzvUmQsQeFB2MjUZ6WAelMXj/EFz2+XkK+bcWNXwfbrLptJQ+XwGpPZlu9YV/kzO63ghVrAyEg0+p7Pn1TO9f1ZYg4R6JfP/3qwH1AsE+X3PNpIewsuEIKwd2wUCJDf5RXJTpl39GizcBFemrRqgs0bdqAN/vUT9YvtWn8fCYR5EfJHVXOK8P1KmnbuGZpk7ryz21pDMlgw13+8aYB+LPkxdv5zG54A5c6o9N3zOCblvRFWaNBqathS8y04cOYWPmyu+Q0Xccwi7vM3Ktm8RoJw+raQNwsmneJOm6KXKnjoOQeiQ== savakian@sfa2.grenoble.senslab.info"
+ #pubkey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAwSUkJ+cr3xM47h8lFkIXJoJhg4wHakTaLJmgTXkzvUmQsQeFB2MjUZ6WAelMXj/EFz2+XkK+bcWNXwfbrLptJQ+XwGpPZlu9YV/kzO63ghVrAyEg0+p7Pn1TO9f1ZYg4R6JfP/3qwH1AsE+X3PNpIewsuEIKwd2wUCJDf5RXJTpl39GizcBFemrRqgs0bdqAN/vUT9YvtWn8fCYR5EfJHVXOK8P1KmnbuGZpk7ryz21pDMlgw13+8aYB+LPkxdv5zG54A5c6o9N3zOCblvRFWaNBqathS8y04cOYWPmyu+Q0Xccwi7vM3Ktm8RoJw+raQNwsmneJOm6KXKnjoOQeiQ== savakian@sfa2.grenoble.iotlab.info"
#ret = ldap_server.LdapModifyUser(record_night, {'sshPublicKey':pubkey})
- #print "\r\n Sandrine \tChange pubkey LdapModifyUser ", ret
-
+ #print "\r\n Sandrine \tChange pubkey LdapModifyUser ", ret
+
#record_myslice = {}
- #record_myslice['hrn']= 'senslab.myslice'
+ #record_myslice['hrn']= 'iotlab.myslice'
#record_myslice['last_name'] = 'myslice'
#record_myslice['first_name'] = 'myslice'
#record_myslice['mail'] = 'nturro@inria.fr'
#pubkeymyslice = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuyRPwn8PZxjdhu+ciRuPyM0eVBn7XS7i3tym9F30UVhaCd09a/UEmGn7WJZdfsxV3hXqG1Wc766FEst97NuzHzELSuvy/rT96J0UHG4wae4pnzOLd6NwFdZh7pkPsgHMHxK9ALVE68Puu+EDSOB5bBZ9Q624wCIGxEpmuS/+X+dDBTKgG5Hi0WA1uKJwhLSbbXb38auh4FlYgXPsdpljTIJatt+zGL0Zsy6fdrsVRc5W8kr3/SmE4OMNyabKBNyxioSEuYhRSjoQAHnYoevEjZniP8IzscKK7qwelzGUfnJEzexikhsQamhAFti2ReiFfoHBRZxnSc49ioH7Kaci5w== root@rhoecos3.ipv6.lip6.fr"
-
+
#pubkeytestuser = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYS8tzufciTm6GdNUGHQc64OfTxFebMYUwh/Jl04IPTvjjr26uakbM0M2v33HxZ5Q7PnmPN9pB/w+a+f7a7J4cNs/tApOMg2hb6UrLaOrdnDMOs4KZlfElyDsF3Zx5QwxPYvzsKADAbDVoX4NF9PttuDLdm2l3nLSvm89jfla00GBg+K8grdOCHyYZVX/Wt7kxhXDK3AidQhKJgn+iD5GxvtWMBE+7S5kJGdRW1W10lSLBW3+VNsCrKJB2s8L55Xz/l2HNBScU7T0VcMQJrFxEXKzLPagZsMz0lfLzHESoGHIZ3Tz85DfECbTtMxLts/4KoAEc3EE+PYr2VDeAggDx testuser@myslice"
-
-
+
+
#password = "ReptileFight"
#enc = ldap_server.encrypt_password(password)
#print "\r\n sandrine \tencrypt_password ", enc
-
+
#ret = ldap_server.LdapModifyUser(record_avakian, {'userPassword':enc})
- #print "\r\n sandrine \tChange password LdapModifyUser ", ret
+ #print "\r\n sandrine \tChange password LdapModifyUser ", ret
return
def get_stuff(oar, uri):
import httplib
- import json
+ import json
headers = {}
- data = json.dumps({})
-
- headers['X-REMOTE_IDENT'] = 'avakian'
-
+ data = json.dumps({})
+
+ headers['X-REMOTE_IDENT'] = 'avakian'
headers['content-length'] = '0' #seems that it does not work if we don't add this
-
+
conn = httplib.HTTPConnection(oar.oarserver['ip'], oar.oarserver['port'])
conn.request("GET", uri, data , headers )
- resp = ( conn.getresponse()).read()
+ resp = (conn.getresponse()).read()
conn.close()
-
+
js = json.loads(resp)
return js
-
-
+
+
def TestOAR(job_id = None):
- print "JOB_ID", job_id
+ print "JOB_ID", job_id
if isinstance(job_id, list) :
if len(job_id) >= 1:
job_id = job_id[0]
else:
job_id = '1'
else:
- job_id = '1'
- print "JOB_ID", job_id
+ job_id = '1'
+ print "JOB_ID", job_id
oar = OARrestapi()
- jobs = oar.parser.SendRequest("GET_reserved_nodes", username = 'avakian')
+ jobs = oar.parser.SendRequest("GET_reserved_nodes", username = 'avakian')
print "\r\n OAR GET_reserved_nodes ", jobs
-
-
- jobs = oar.parser.SendRequest("GET_jobs")
+
+
+ jobs = oar.parser.SendRequest("GET_jobs")
print "\r\n OAR GET_jobs ", jobs
-
-
+
+
jobs = oar.parser.SendRequest("GET_jobs_id", job_id, 'avakian')
print "\r\n OAR GET_jobs_id ", jobs
-
- uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'
+
+ uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'
raw_json = get_stuff(oar, uri)
print "\r\nOAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
-
+
uri = '/oarapi/jobs/' + job_id +'.json'
- raw_json = get_stuff(oar, uri)
+ raw_json = get_stuff(oar, uri)
print "\r\n OAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
-
+
uri = '/oarapi/jobs/' + job_id + '/resources.json'
raw_json = get_stuff(oar, uri)
print "\r\n OAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
-
+
time_format = "%Y-%m-%d %H:%M:%S"
-
+
server_timestamp, server_tz = oar.parser.SendRequest("GET_timezone")
-
+
print "\r\n OAR GetTimezone ", server_timestamp, server_tz
print(datetime.fromtimestamp(int(server_timestamp)).strftime('%Y-%m-%d %H:%M:%S'))
uri = '/oarapi/resources/full.json'
raw_json = get_stuff(oar, uri)
print "\r\n OAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
-
- uri = '/oarapi/jobs.json?user=avakian'
+
+ uri = '/oarapi/jobs.json?user=avakian'
raw_json = get_stuff(oar, uri)
print "\r\nOAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
return
-
-
-
-def TestSlabDriver(job_id = None):
+
+
+def TestImporter(arg=None):
+ iotlabdriver = IotlabDriver(Config())
+
+ nodes_listdict = iotlabdriver.iotlab_api.GetNodes()
+ sites_listdict = iotlabdriver.iotlab_api.GetSites()
+ nodes_by_id = dict([(node['node_id'], node) for node in nodes_listdict])
+
+ # from sfa.importer.iotlabimporter import IotlabImporter
+ # importer = IotlabImporter()
+
+def TestIotlabDriver(job_id = None):
if job_id is None:
job_id = 1
-
+
if isinstance(job_id, list) and len(job_id) == 1:
job_id = job_id[0]
- slabdriver = SlabDriver(Config())
-
- #nodes = slabdriver.slab_api.GetReservedNodes()
+ iotlabdriver = IotlabDriver(Config())
+
+ #nodes = iotlabdriver.iotlab_api.GetReservedNodes()
#print " \r\n \r\n GetReservedNodes", nodes
-
- #sl = slabdriver.slab_api.GetSlices(slice_filter='senslab.avakian_slice', slice_filter_type='slice_hrn')
- #print "\r\n \r\nGetSlices", sl[0]
-
- #sl = slabdriver.slab_api.GetSlices(slice_filter='20', slice_filter_type='record_id_user')
+
+ sl = iotlabdriver.iotlab_api.GetSlices(slice_filter='iotlab.avakian_slice', slice_filter_type='slice_hrn')
+ print "\r\n \r\nGetSlices", sl[0]
+
+ #sl = iotlabdriver.iotlab_api.GetSlices(slice_filter='20', slice_filter_type='record_id_user')
#print "\r\n \r\nGetSlices", sl
-
- #sl = slabdriver.slab_api.GetSlices()
+
+ #sl = iotlabdriver.iotlab_api.GetSlices()
#print "\r\n \r\nGetSlices", sl
-
- persons = slabdriver.slab_api.GetPersons()
+
+ persons = iotlabdriver.iotlab_api.GetPersons()
print "\r\n \r\n GetPersons", persons
-
- leases = slabdriver.slab_api.GetLeases(login='avakian')
+
+ leases = iotlabdriver.iotlab_api.GetLeases(login='avakian')
+ print "\r\n \r\n GetLeases", leases
+
+ leases = iotlabdriver.iotlab_api.GetLeases(lease_filter_dict={'slice_hrn':'iotlab.avakian_slice'})
print "\r\n \r\n GetLeases", leases
-
+ leases = iotlabdriver.iotlab_api.GetLeases(lease_filter_dict={'t_from':1405070000})
+ print "\r\n \r\n GetLeases", leases
def TestSfi(filename = None):
if filename is None:
filename = "/home/savakian/flab-sfa/test_rspec/my_lyon_nodes.rspec"
print " ================= SFI.PY RESOURCES =============", \
- os.system("sfi.py list senslab")
-
+ os.system("sfi.py list iotlab")
+
print os.system("sfi.py resources")
- print os.system("sfi.py resources -r slab")
+ print os.system("sfi.py resources -r iotlab")
print os.system("sfi.py resources -l all")
-
-
- print "================ SFI.PY RESOURCES -R SLAB -L ALL ============\r\n", \
- os.system("sfi.py resources -r slab -l all")
-
+
+
+ print "================ SFI.PY RESOURCES -R IOTLAB -L ALL ============\r\n", \
+ os.system("sfi.py resources -r iotlab -l all")
+
print "================ WRITING sfi.py resources -l all ===========\r\n", \
filename
-
+
filename = filename.split(".")[0]+"_out.rspec"
rspecfile = open(filename,"w")
- r = os.popen("sfi.py resources -l all")
+ r = os.popen("sfi.py resources -l all")
for i in r.readlines():
rspecfile.write(i)
rspecfile.close()
-
+
print " ================= SFI.PY SHOW SLICE ============= \r\n", \
- os.system("sfi.py resources senslab.avakian_slice")
-
+ os.system("sfi.py resources iotlab.avakian_slice")
+
print " ================= SFI.PY SHOW USER =============\r\n", \
- os.system("sfi.py show senslab.avakian_slice")
+ os.system("sfi.py show iotlab.avakian_slice")
print " ================= SFI.PY SHOW NODE =============\r\n", \
- os.system("sfi.py show senslab.avakian")
+ os.system("sfi.py show iotlab.avakian")
print " ================= SFI.PY SLICES =============\r\n", \
- os.system("sfi.py show senslab.node6.devlille.senslab.info")
+ os.system("sfi.py show iotlab.node6.devlille.iotlab.info")
print " ================= SFI.PY LIST SLICE =============\r\n", \
os.system("sfi.py slices")
print " ================= SFI.PY STATUS SLICE =============\r\n", \
- os.system("sfi.py status senslab.avakian_slice")
-
+ os.system("sfi.py status iotlab.avakian_slice")
+
print " ================= SFI.PY DELETE SLICE =============\r\n", \
- os.system("sfi.py delete senslab.avakian_slice")
-
+ os.system("sfi.py delete iotlab.avakian_slice")
+
print " ================= SFI.PY CREATE SLICE =============\r\n", \
- os.system("sfi.py create senslab.avakian_slice \
+ os.system("sfi.py create iotlab.avakian_slice \
/home/savakian/flab-sfa/test_rspec/my_lyon_nodes.rspec")
-
+
def TestSQL(arg = None):
from sfa.storage.model import make_record, RegSlice, RegRecord
from sfa.storage.alchemy import dbsession
- from sqlalchemy.orm.collections import InstrumentedList
-
- from sqlalchemy.orm import joinedload
-
- #solo_query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn='senslab.avakian_slice').first()
+ from sqlalchemy.orm.collections import InstrumentedList
+
+ from sqlalchemy.orm import joinedload
+
+ #solo_query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn='iotlab.avakian_slice').first()
#print "\r\n \r\n =========== query_slice_list RegSlice \
- #joinedload('reg_researchers') senslab.avakian first \r\n \t ", \
+ #joinedload('reg_researchers') iotlab.avakian first \r\n \t ", \
#solo_query_slice_list.__dict__
-
- #query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
+
+ #query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
#print "\r\n \r\n =========== query_slice_list RegSlice \
#joinedload('reg_researchers') ALL \r\n \t", \
- #query_slice_list[0].__dict__
-
+ #query_slice_list[0].__dict__
+
#return_slicerec_dictlist = []
#record = query_slice_list[0]
- #print "\r\n \r\n =========== \r\n \t", record
-
+ #print "\r\n \r\n =========== \r\n \t", record
+
#tmp = record.__dict__
- #print "\r\n \r\n =========== \r\n \t", tmp
+ #print "\r\n \r\n =========== \r\n \t", tmp
#tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- #print "\r\n \r\n =========== \r\n \t", tmp
+ #print "\r\n \r\n =========== \r\n \t", tmp
##del tmp['reg_researchers']['_sa_instance_state']
#return_slicerec_dictlist.append(tmp)
-
+
#print "\r\n \r\n =========== \r\n \t", return_slicerec_dictlist
-
+
all_records = dbsession.query(RegRecord).all()
-
- #create hash by (type,hrn)
- #used to know if a given record is already known to SFA
-
+
+ #create hash by (type,hrn)
+ #used to know if a given record is already known to SFA
+
records_by_type_hrn = \
dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
for (rec_type, rec) in records_by_type_hrn :
if rec_type == 'user':
- print >>sys.stderr,"\r\n SLABIMPORT \t keys %s rec %s \r\n" %(rec_type, rec )
-
+ print >>sys.stderr,"\r\n IOTLABIMPORT \t keys %s rec %s \r\n" %(rec_type, rec )
+
users_rec_by_email = \
dict ( [ (record.email, record) for record in all_records if record.type == 'user' ] )
-
-
+
+
def RunAll( arg ):
TestLdap()
TestOAR()
- TestSlabDriver()
+ TestIotlabDriver()
TestSfi()
-
-
+
+
supported_options = {
'OAR' : TestOAR,
'LDAP': TestLdap,
- 'driver': TestSlabDriver,
+ 'driver': TestIotlabDriver,
'sfi':TestSfi,
'sql':TestSQL,
- 'all' : RunAll }
-
+ 'all' : RunAll,
+ 'import': TestImporter }
+
def main():
opts = parse_options()
print opts
for opt in opts:
supported_options[opt](opts[opt])
-
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env python
+import sys
+import os
+from sfa.iotlab.LDAPapi import LDAPapi
+from difflib import SequenceMatcher
+
+def parse_options():
+
+ #arguments supplied
+ if len(sys.argv) > 1 :
+ options_list = sys.argv[1:]
+ print options_list
+ rspec_rep = options_list[0]
+ return rspec_rep
+ else:
+ print "Must supply Rspecs directory ", sys.argv[1:]
+ return
+
+
+rspec_dir = parse_options()
+print "DIRECTORY SUPPLIED" , rspec_dir
+rspec_filename_list = ['firexp_avakian_slice_iotlab.rspec',
+'firexp_iotlab_slice_iotlab.rspec',
+'iotlab_avakian_slice_iotlab2.rspec',
+'iotlab_avakian_slice_plab.rspec',
+'firexp_iotlab_slice_all.rspec',
+'iotlab_avakian_slice_all.rspec',
+'iotlab_avakian_slice_iotlab.rspec',
+'iotlab_user_slice_iotlab.rspec',
+'test_delete_all_leases.rspec']
+
+rspec_filename_dict = {
+ ('iotlab_avakian', 'iotlab'):
+ "sfi.py create iotlab.avakian_slice " + rspec_dir + \
+ 'iotlab_avakian_slice_iotlab.rspec',
+
+ ('iotlab_avakian', 'iotlab2'):
+ "sfi.py create iotlab.avakian_slice " + rspec_dir + \
+ 'iotlab_avakian_slice_iotlab2.rspec',
+
+ ('firexp_user','iotlab'):
+ "sfi.py create firexp.flab.iotlab_slice " + rspec_dir + \
+ 'firexp_iotlab_slice_iotlab.rspec',
+
+ ('firexp_user', 'all'):
+ "sfi.py create firexp.flab.iotlab_slice "+ rspec_dir + \
+ 'firexp_iotlab_slice_all.rspec',
+
+ ('iotlab_user', 'iotlab'):
+ "sfi.py create iotlab.user_slice "+ rspec_dir + \
+ 'iotlab_user_slice_iotlab.rspec',
+
+ ('firexp_avakian','iotlab'):
+ "sfi.py create firexp.flab.avakian_slice " + rspec_dir + \
+ 'firexp_avakian_slice_iotlab.rspec',
+
+ ('iotlab_avakian', 'plab') :
+ "sfi.py create iotlab.avakian_slice " + rspec_dir + \
+ 'iotlab_avakian_slice_plab.rspec',
+
+ ('iotlab_avakian', 'all') :
+ "sfi.py create iotlab.avakian_slice " + rspec_dir + \
+ 'iotlab_avakian_slice_all.rspec'
+
+ }
+# check if the firexp user (uid user) is already in LDAP
+# in this is the case, delete it :
+ldap_server = LDAPapi()
+dn = 'uid=' + 'user' + ',' + ldap_server.baseDN
+result = ldap_server.LdapSearch('(uid=user)', [])
+
+if result != []:
+ retval = ldap_server.LdapDelete(dn)
+ print "deleting firexp user : ", retval
+
+
+print "config sfi"
+with open ("/root/.sfi/sfi_config", "r") as sfi_config:
+ sfi_config_txt = [line for line in sfi_config]
+
+with open("/root/.sfi/sfi_config_iotlab", "r") as sfi_config_iotlab:
+ sfi_config_iotlab_txt = [line for line in sfi_config_iotlab]
+
+with open("/root/.sfi/sfi_config_firexp", "r") as sfi_config_firexp:
+ sfi_config_firexp_txt = [line for line in sfi_config_firexp]
+# check that we are using the iotlab sfi configuration
+result1 = SequenceMatcher(None, sfi_config_txt, sfi_config_iotlab_txt)
+
+result2 = SequenceMatcher(None, sfi_config_txt, sfi_config_firexp_txt)
+
+if result1.ratio() != 1.0:
+ os.system('cp /root/.sfi/sfi_config_iotlab /root/.sfi/sfi_config')
+
+os.system('cat /root/.sfi/sfi_config')
+os.system('rm /root/tests_rspecs/iotlab_devlille_OUTPUT.rspec')
+
+print " ================= SFI.PY LIST IOTLAB ============="
+os.system('sfi.py list iotlab')
+
+
+print " ================= SFI.PY RESOURCES ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources')
+
+
+print " ================= SFI.PY RESOURCES -R IOTLAB ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -r iotlab')
+
+
+print " ================= SFI.PY RESOURCES -L ALL ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -l all')
+
+print " ================= SFI.PY RESOURCES -R IOTLAB -L ALL ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -r iotlab -l all')
+
+print " ================= SFI.PY RESOURCES -O output rspec ==========="
+os.system('sfi.py resources -o /root/tests_rspecs/iotlab_devlille_OUTPUT.rspec')
+
+print " ================= SFI.PY RESOURCES -L LEASES ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -l leases')
+
+
+print " ================= SFI.PY SHOW USER ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py show iotlab.avakian')
+
+print " ================= SFI.PY SHOW NODE ============="
+os.system('sfi.py show m3-3.devgrenoble.iot-lab.info')
+
+print " ================= SFI.PY SLICES ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py slices')
+
+print " ================= SFI.PY STATUS SLICE ============="
+os.system('sfi.py status iotlab.avakian_slice')
+
+print " ================= SFI.PY CREATE SLICE on iotlab only ============="
+raw_input("Press Enter to continue...")
+os.system( rspec_filename_dict[('iotlab_avakian','iotlab')])
+
+
+print " ================= SFI.PY RESOURCES -l all iotlab.avakian_slice ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -l all iotlab.avakian_slice')
+
+
+print " ================= SFI.PY DELETE SLICE ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py delete iotlab.avakian_slice')
+
+
+print " ================= SFI.PY CREATE SLICE on iotlab and firexp ============="
+raw_input("Press Enter to continue...")
+os.system(rspec_filename_dict[('iotlab_avakian','all')])
+
+
+print " ================= SFI.PY RESOURCES -l all -r iotlab iotlab.avakian_slice ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -l all -r iotlab iotlab.avakian_slice')
+
+
+print " =================SFI.PY RESOURCES -L LEASES -R IOTLAB ============== "
+os.system('sfi.py resources -r iotlab -l leases')
+
+
+print " ================= SFI.PY DELETE SLICE ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py delete iotlab.avakian_slice')
+
+print "\r\n \r\n"
+
+print " *********changing to firexp sfi config ***************"
+os.system('cp /root/.sfi/sfi_config_firexp /root/.sfi/sfi_config')
+
+
+
+print " ================= SFI.PY CREATE SLICE on iotlab and firexp ============="
+raw_input("Press Enter to continue...")
+os.system(rspec_filename_dict[('firexp_user','all')])
+
+
+print " ================= SFI.PY SHOW SLICE ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py show firexp.flab.iotlab_slice')
+
+
+print " ================= SFI.PY RESOURCES -l leases firexp.flab.iotlab_slice ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources -l leases firexp.flab.iotlab_slice')
+
+
+print " ================= SFI.PY RESOURCES firexp.flab.iotlab_slice ============="
+raw_input("Press Enter to continue...")
+os.system('sfi.py resources firexp.flab.iotlab_slice')
+
+
+
+
--- /dev/null
+Rspec file names
+======================
+Rspec file names are constructed as follows :
+ slice name used in this rspec + network in which the reserved nodes are
+
+Networks can be : iotlab, plab, all (iotlab + plab)
+
+Slices and users
+=================
+user:
+login iotlab : user
+hrn iotlab: iotlab.user
+hrn firexp: firexp.flab.iotlab_user
+slice iotlab: iotlab.user_slice
+slice firexp : firexp.flab.iotlab_slice
+
+
+This special test user comes from Firexp and is considered as an
+external user coming from a federated testbedd for Iotlab.
+
+user:
+login iotlab: avakian
+slice iotlab: iotlab.avakian_slice
+hrn firexp : firexp.flab.avakian (?)
+slice firexp : firexp.flab.avakian_slice (?)
+
+This user comes from iotlab.
+
+
+Leases
+======
+
+The starting time of the leases in those RSpec files are
+usually set to be in 2014, so that we don't have to keep the
+date in mind and check that we are not scheduling a lease
+in the past.
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>wsn430-12.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ </network>
+ <lease slice_id="urn:publicid:IDN+firexp:flab+slice+avakian_slice" start_time="1386765700" duration="10">
+ <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
+ </lease>
+</RSpec>
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
+ <network name="plab" >
+ <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
+ <hostname>effet.pl.sophia.inria.fr</hostname>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="3600"/>
+ <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
+ <hrn>planetlab.test.plab.effet</hrn>
+ <sliver/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1412938800" duration="1">
+ <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
+ </lease>
+ </network>
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>wsn430-12.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1412938800" duration="60">
+ <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
+ </lease>
+ </network>
+</RSpec>
+
+
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="node5.devlille.iotlab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>wsn430-12.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="1"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="1"/>
+ <sliver/>
+ </node>
+ </network>
+ <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1405078900" duration="600">
+ <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
+ </lease>
+</RSpec>
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
+ <network name="plab" >
+ <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
+ <hostname>effet.pl.sophia.inria.fr</hostname>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="3600"/>
+ <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
+ <hrn>planetlab.test.plab.effet</hrn>
+ <sliver/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1410346800" duration="1">
+ <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
+ </lease>
+ </network>
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>wsn430-12.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
+ <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1410346800" duration="60">
+ <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
+ </lease>
+ </network>
+</RSpec>
+
+
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
+ <hostname>wsn430-12.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
+ <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="60"/>
+ <sliver/>
+ </node>
+ </network>
+ <lease slice_id="urn:publicid:IDN+iotlab+slice+sandrine_slice" start_time="1405078836" duration="20">
+ <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
+ </lease>
+</RSpec>
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-8.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-8.devlille.iot-lab.info.info" site_id="urn:publicid:IDN+senslab+authority+sa">
+ <hostname>wsn430-8.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="600"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-5.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-5.devlille.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
+ <hostname>wsn430-5.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="600"/>
+ <sliver/>
+ </node>
+ </network>
+ <network name="plab">
+ <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
+ <hostname>effet.pl.sophia.inria.fr</hostname>
+ <exclusive>FALSE</exclusive>
+ <interface component_id="urn:publicid:IDN+plab+interface+node1:eth0" ipv4="138.96.116.135"/>
+ <arch>x86_64</arch>
+ <fcdistro>f14</fcdistro>
+ <pldistro>onelab</pldistro>
+ <hrn>planetlab.test.plab.effet</hrn>
+ <sliver/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1405078900" duration="10">
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-5.devlille.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-8.devlille.iot-lab.info"/>
+ </lease>
+ </network>
+</RSpec>
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2014-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
+ <network name="plab">
+ <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
+ <hostname>effet.pl.sophia.inria.fr</hostname>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="3600"/>
+ <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
+ <hrn>planetlab.test.plab.effet</hrn>
+ <sliver/>
+ </node>
+ </network>
+ <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1405080000" duration="2">
+ <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
+ </lease>
+</RSpec>
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
+ <network name="iotlab">
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
+ <hostname>wsn430-12.devlille.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="600"/>
+ <sliver/>
+ </node>
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
+ <hostname>"a8-11.devgrenoble.iot-lab.info</hostname>
+ <location country="France"/>
+ <exclusive>TRUE</exclusive>
+ <granularity grain="600"/>
+ <sliver/>
+ </node>
+ </network>
+ <lease slice_id="urn:publicid:IDN+iotlab+slice+user_slice" start_time="1405078900" duration="601">
+ <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
+ <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
+ </lease>
+</RSpec>
--- /dev/null
+<?xml version="1.0"?>
+<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
+ <network name="plab" >
+ </network>
+ <network name="iotlab">
+ </network>
+</RSpec>
+
+
+++ /dev/null
-#!/bin/bash
-if (( ! $# == 2 ))
-then
- echo " Usage : bash_test takes 2 arguments : one jobid and one of the following:"
- echo " LDAP/ OAR / driver "
- echo $#
- exit
-fi
-
-sfi.py list senslab2
-echo " ================= SFI.PY RESOURCES ============="
-sfi.py resources
-
-echo " ================= SFI.PY RESOURCES -R SLAB ============="
-sfi.py resources -r slab
-
-echo " ================= SFI.PY RESOURCES -L ALL ============="
-sfi.py resources -l all
-
-echo " ================= SFI.PY RESOURCES -R SLAB -L ALL ============="
-sfi.py resources -r slab -l all
-
-echo " ================= SFI.PY RESOURCES -L ALL > avakian_adv.rspec ============="
-sfi.py resources -l all > /home/savakian/flab-sfa/avakian_adv.rspec
-
-echo " ================= SFI.PY RESOURCES avakian_adv.rspec ============="
-sfi.py resources senslab2.avakian_slice
-
-
-echo " ================= SFI.PY SHOW SLICE ============="
-sfi.py show senslab2.avakian_slice
-
-echo " ================= SFI.PY SHOW USER ============="
-sfi.py show senslab2.avakian
-
-echo " ================= SFI.PY SHOW NODE ============="
-sfi.py show senslab2.node67.grenoble.senslab.info
-
-echo " ================= SFI.PY SLICES ============="
-sfi.py slices
-
-echo " ================= SFI.PY STATUS SLICE ============="
-sfi.py status senslab2.avakian_slice
-
-echo " ================= SFI.PY CREATE SLICE ============="
-sfi.py create senslab2.avakian_slice /home/savakian/flab-sfa/avakian_adv.rspec
-
-# echo " ================= SFI.PY DELETE SLICE ============="
-# sfi.py delete senslab2.avakian_slice
-
-echo "\r\n"
-echo " PYTHON TEST ", $1, $2