+++ /dev/null
-"""
-This API is adapted for OpenLDAP. The file contains all LDAP classes and methods
-needed to:
-- Load the LDAP connection configuration file (login, address..) with LdapConfig
-- Connect to LDAP with ldap_co
-- Create a unique LDAP login and password for a user based on his email or last
-name and first name with LoginPassword.
-- Manage entries in LDAP using SFA records with LDAPapi (Search, Add, Delete,
-Modify)
-
-"""
-import random
-from passlib.hash import ldap_salted_sha1 as lssha
-
-from sfa.util.xrn import get_authority
-from sfa.util.sfalogging import logger
-from sfa.util.config import Config
-
-import ldap
-import ldap.modlist as modlist
-
-import os.path
-
-
-class LdapConfig():
- """
- Ldap configuration class loads the configuration file and sets the
- ldap IP address, password, people dn, web dn, group dn. All these settings
- were defined in a separate file ldap_config.py to avoid sharing them in
- the SFA git as it contains sensible information.
-
- """
- def __init__(self, config_file='/etc/sfa/ldap_config.py'):
- """Loads configuration from file /etc/sfa/ldap_config.py and set the
- parameters for connection to LDAP.
-
- """
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
-
-
-class ldap_co:
- """ Set admin login and server configuration variables."""
-
- def __init__(self):
- """Fetch LdapConfig attributes (Ldap server connection parameters and
- defines port , version and subtree scope.
-
- """
- #Iotlab PROD LDAP parameters
- self.ldapserv = None
- ldap_config = LdapConfig()
- self.config = ldap_config
- self.ldapHost = ldap_config.LDAP_IP_ADDRESS
- self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
- self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
- self.ldapAdminDN = ldap_config.LDAP_WEB_DN
- self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
- self.ldapPort = ldap.PORT
- self.ldapVersion = ldap.VERSION3
- self.ldapSearchScope = ldap.SCOPE_SUBTREE
-
- def connect(self, bind=True):
- """Enables connection to the LDAP server.
-
- :param bind: Set the bind parameter to True if a bind is needed
- (for add/modify/delete operations). Set to False otherwise.
- :type bind: boolean
- :returns: dictionary with status of the connection. True if Successful,
- False if not and in this case the error
- message( {'bool', 'message'} ).
- :rtype: dict
-
- """
- try:
- self.ldapserv = ldap.open(self.ldapHost)
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- # Bind with authentification
- if(bind):
- return self.bind()
-
- else:
- return {'bool': True}
-
- def bind(self):
- """ Binding method.
-
- :returns: dictionary with the bind status. True if Successful,
- False if not and in this case the error message({'bool','message'})
- :rtype: dict
-
- """
- try:
- # Opens a connection after a call to ldap.open in connect:
- self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
-
- # Bind/authenticate with a user with apropriate
- #rights to add objects
- self.ldapserv.simple_bind_s(self.ldapAdminDN,
- self.ldapAdminPassword)
-
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- return {'bool': True}
-
- def close(self):
- """Close the LDAP connection.
-
- Can throw an exception if the unbinding fails.
-
- :returns: dictionary with the bind status if the unbinding failed and
- in this case the dict contains an error message. The dictionary keys
- are : ({'bool','message'})
- :rtype: dict or None
-
- """
- try:
- self.ldapserv.unbind_s()
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
-
-class LoginPassword():
- """
-
- Class to handle login and password generation, using custom login generation
- algorithm.
-
- """
- def __init__(self):
- """
-
- Sets password and login maximum length, and defines the characters that
- can be found in a random generated password.
-
- """
- self.login_max_length = 8
- self.length_password = 8
- self.chars_password = ['!', '$', '(',')', '*', '+', ',', '-', '.',
- '0', '1', '2', '3', '4', '5', '6', '7', '8',
- '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
- 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
- '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- '\'']
-
- @staticmethod
- def clean_user_names(record):
- """
-
- Removes special characters such as '-', '_' , '[', ']' and ' ' from the
- first name and last name.
-
- :param record: user's record
- :type record: dict
- :returns: lower_first_name and lower_last_name if they were found
- in the user's record. Return None, none otherwise.
- :rtype: string, string or None, None.
-
- """
- if 'first_name' in record and 'last_name' in record:
- #Remove all special characters from first_name/last name
- lower_first_name = record['first_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- lower_last_name = record['last_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- return lower_first_name, lower_last_name
- else:
- return None, None
-
- @staticmethod
- def extract_name_from_email(record):
- """
-
- When there is no valid first name and last name in the record,
- the email is used to generate the login. Here, we assume the email
- is firstname.lastname@something.smthg. The first name and last names
- are extracted from the email, special charcaters are removed and
- they are changed into lower case.
-
- :param record: user's data
- :type record: dict
- :returns: the first name and last name taken from the user's email.
- lower_first_name, lower_last_name.
- :rtype: string, string
-
- """
-
- email = record['email']
- email = email.split('@')[0].lower()
- lower_first_name = None
- lower_last_name = None
- #Assume there is first name and last name in email
- #if there is a separator
- separator_list = ['.', '_', '-']
- for sep in separator_list:
- if sep in email:
- mail = email.split(sep)
- lower_first_name = mail[0]
- lower_last_name = mail[1]
- break
-
- #Otherwise just take the part before the @ as the
- #lower_first_name and lower_last_name
- if lower_first_name is None:
- lower_first_name = email
- lower_last_name = email
-
- return lower_first_name, lower_last_name
-
- def get_user_firstname_lastname(self, record):
- """
-
- Get the user first name and last name from the information we have in
- the record.
-
- :param record: user's information
- :type record: dict
- :returns: the user's first name and last name.
-
- .. seealso:: clean_user_names
- .. seealso:: extract_name_from_email
-
- """
- lower_first_name, lower_last_name = self.clean_user_names(record)
-
- #No first name and last name check email
- if lower_first_name is None and lower_last_name is None:
-
- lower_first_name, lower_last_name = \
- self.extract_name_from_email(record)
-
- return lower_first_name, lower_last_name
-
- def choose_sets_chars_for_login(self, lower_first_name, lower_last_name):
- """
-
- Algorithm to select sets of characters from the first name and last
- name, depending on the lenght of the last name and the maximum login
- length which in our case is set to 8 characters.
-
- :param lower_first_name: user's first name in lower case.
- :param lower_last_name: usr's last name in lower case.
- :returns: user's login
- :rtype: string
-
- """
- length_last_name = len(lower_last_name)
- self.login_max_length = 8
-
- #Try generating a unique login based on first name and last name
-
- if length_last_name >= self.login_max_length:
- login = lower_last_name[0:self.login_max_length]
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name >= 4:
- login = lower_last_name
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 3:
- login = lower_first_name[0:1] + lower_last_name
- index = 1
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 2:
- if len(lower_first_name) >= 2:
- login = lower_first_name[0:2] + lower_last_name
- index = 2
- logger.debug("login : %s index : %s" % (login, index))
- else:
- logger.error("LoginException : \
- Generation login error with \
- minimum four characters")
-
- else:
- logger.error("LDAP LdapGenerateUniqueLogin failed : \
- impossible to generate unique login for %s %s"
- % (lower_first_name, lower_last_name))
- return index, login
-
- def generate_password(self):
- """
-
- Generate a password upon adding a new user in LDAP Directory
- (8 characters length). The generated password is composed of characters
- from the chars_password list.
-
- :returns: the randomly generated password
- :rtype: string
-
- """
- password = str()
-
- length = len(self.chars_password)
- for index in range(self.length_password):
- char_index = random.randint(0, length - 1)
- password += self.chars_password[char_index]
-
- return password
-
- @staticmethod
- def encrypt_password(password):
- """
-
- Use passlib library to make a RFC2307 LDAP encrypted password salt size
- is 8, use sha-1 algorithm.
-
- :param password: password not encrypted.
- :type password: string
- :returns: Returns encrypted password.
- :rtype: string
-
- """
- #Keep consistency with Java Iotlab's LDAP API
- #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
- return lssha.encrypt(password, salt_size=8)
-
-
-class LDAPapi:
- """Defines functions to insert and search entries in the LDAP.
-
- .. note:: class supposes the unix schema is used
-
- """
- def __init__(self):
- logger.setLevelDebug()
-
- #SFA related config
-
- config = Config()
- self.login_pwd = LoginPassword()
- self.authname = config.SFA_REGISTRY_ROOT_AUTH
- self.conn = ldap_co()
- self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
- self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
- self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
- self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
- self.baseDN = self.conn.ldapPeopleDN
- self.ldapShell = '/bin/bash'
-
-
- def LdapGenerateUniqueLogin(self, record):
- """
-
- Generate login for adding a new user in LDAP Directory
- (four characters minimum length). Get proper last name and
- first name so that the user's login can be generated.
-
- :param record: Record must contain first_name and last_name.
- :type record: dict
- :returns: the generated login for the user described with record if the
- login generation is successful, None if it fails.
- :rtype: string or None
-
- """
- #For compatibility with other ldap func
- if 'mail' in record and 'email' not in record:
- record['email'] = record['mail']
-
- lower_first_name, lower_last_name = \
- self.login_pwd.get_user_firstname_lastname(record)
-
- index, login = self.login_pwd.choose_sets_chars_for_login(
- lower_first_name, lower_last_name)
-
- login_filter = '(uid=' + login + ')'
- get_attrs = ['uid']
- try:
- #Check if login already in use
-
- while (len(self.LdapSearch(login_filter, get_attrs)) is not 0):
-
- index += 1
- if index >= 9:
- logger.error("LoginException : Generation login error \
- with minimum four characters")
- else:
- try:
- login = \
- lower_first_name[0:index] + \
- lower_last_name[0:
- self.login_pwd.login_max_length
- - index]
- login_filter = '(uid=' + login + ')'
- except KeyError:
- print "lower_first_name - lower_last_name too short"
-
- logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"
- % (login))
- return login
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" % (error))
- return None
-
- def find_max_uidNumber(self):
- """Find the LDAP max uidNumber (POSIX uid attribute).
-
- Used when adding a new user in LDAP Directory
-
- :returns: max uidNumber + 1
- :rtype: string
-
- """
- #First, get all the users in the LDAP
- get_attrs = "(uidNumber=*)"
- login_filter = ['uidNumber']
-
- result_data = self.LdapSearch(get_attrs, login_filter)
- #It there is no user in LDAP yet, First LDAP user
- if result_data == []:
- max_uidnumber = self.ldapUserUidNumberMin
- #Otherwise, get the highest uidNumber
- else:
- uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]
- logger.debug("LDAPapi.py \tfind_max_uidNumber \
- uidNumberList %s " % (uidNumberList))
- max_uidnumber = max(uidNumberList) + 1
-
- return str(max_uidnumber)
-
-
- def get_ssh_pkey(self, record):
- """TODO ; Get ssh public key from sfa record
- To be filled by N. Turro ? or using GID pl way?
-
- """
- return 'A REMPLIR '
-
- @staticmethod
- #TODO Handle OR filtering in the ldap query when
- #dealing with a list of records instead of doing a for loop in GetPersons
- def make_ldap_filters_from_record(record=None):
- """Helper function to make LDAP filter requests out of SFA records.
-
- :param record: user's sfa record. Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: LDAP request
- :rtype: string
-
- """
- req_ldap = ''
- req_ldapdict = {}
- if record :
- if 'first_name' in record and 'last_name' in record:
- if record['first_name'] != record['last_name']:
- req_ldapdict['cn'] = str(record['first_name'])+" "\
- + str(record['last_name'])
- if 'email' in record:
- req_ldapdict['mail'] = record['email']
- if 'mail' in record:
- req_ldapdict['mail'] = record['mail']
- if 'enabled' in record:
- if record['enabled'] is True:
- req_ldapdict['shadowExpire'] = '-1'
- else:
- req_ldapdict['shadowExpire'] = '0'
-
- #Hrn should not be part of the filter because the hrn
- #presented by a certificate of a SFA user not imported in
- #Iotlab does not include the iotlab login in it
- #Plus, the SFA user may already have an account with iotlab
- #using another login.
-
- logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
- record %s req_ldapdict %s"
- % (record, req_ldapdict))
-
- for k in req_ldapdict:
- req_ldap += '(' + str(k) + '=' + str(req_ldapdict[k]) + ')'
- if len(req_ldapdict.keys()) >1 :
- req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
- size = len(req_ldap)
- req_ldap = req_ldap[:(size-1)] + ')' + req_ldap[(size-1):]
- else:
- req_ldap = "(cn=*)"
-
- return req_ldap
-
- def make_ldap_attributes_from_record(self, record):
- """
-
- When adding a new user to Iotlab's LDAP, creates an attributes
- dictionnary from the SFA record understandable by LDAP. Generates the
- user's LDAP login.User is automatically validated (account enabled)
- and described as a SFA USER FROM OUTSIDE IOTLAB.
-
- :param record: must contain the following keys and values:
- first_name, last_name, mail, pkey (ssh key).
- :type record: dict
- :returns: dictionary of attributes using LDAP data structure model.
- :rtype: dict
-
- """
-
- attrs = {}
- attrs['objectClass'] = ["top", "person", "inetOrgPerson",
- "organizationalPerson", "posixAccount",
- "shadowAccount", "systemQuotas",
- "ldapPublicKey"]
-
- attrs['uid'] = self.LdapGenerateUniqueLogin(record)
- try:
- attrs['givenName'] = str(record['first_name']).lower().capitalize()
- attrs['sn'] = str(record['last_name']).lower().capitalize()
- attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
- attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
-
- except KeyError:
- attrs['givenName'] = attrs['uid']
- attrs['sn'] = attrs['uid']
- attrs['cn'] = attrs['uid']
- attrs['gecos'] = attrs['uid']
-
- attrs['quota'] = self.ldapUserQuotaNFS
- attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
- attrs['loginShell'] = self.ldapShell
- attrs['gidNumber'] = self.ldapUserGidNumber
- attrs['uidNumber'] = self.find_max_uidNumber()
- attrs['mail'] = record['mail'].lower()
- try:
- attrs['sshPublicKey'] = record['pkey']
- except KeyError:
- attrs['sshPublicKey'] = self.get_ssh_pkey(record)
-
-
- #Password is automatically generated because SFA user don't go
- #through the Iotlab website used to register new users,
- #There is no place in SFA where users can enter such information
- #yet.
- #If the user wants to set his own password , he must go to the Iotlab
- #website.
- password = self.login_pwd.generate_password()
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
-
- #Account automatically validated (no mail request to admins)
- #Set to 0 to disable the account, -1 to enable it,
- attrs['shadowExpire'] = '-1'
-
- #Motivation field in Iotlab
- attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
-
- attrs['ou'] = 'SFA' #Optional: organizational unit
- #No info about those here:
- attrs['l'] = 'To be defined'#Optional: Locality.
- attrs['st'] = 'To be defined' #Optional: state or province (country).
-
- return attrs
-
-
-
- def LdapAddUser(self, record) :
- """Add SFA user to LDAP if it is not in LDAP yet.
-
- :param record: dictionnary with the user's data.
- :returns: a dictionary with the status (Fail= False, Success= True)
- and the uid of the newly added user if successful, or the error
- message it is not. Dict has keys bool and message in case of
- failure, and bool uid in case of success.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record
-
- """
- logger.debug(" \r\n \t LDAP LdapAddUser \r\n\r\n ================\r\n ")
- user_ldap_attrs = self.make_ldap_attributes_from_record(record)
-
- #Check if user already in LDAP wih email, first name and last name
- filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
- user_exist = self.LdapSearch(filter_by)
- if user_exist:
- logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
- already exists" % (user_ldap_attrs['sn'],
- user_ldap_attrs['mail']))
- return {'bool': False}
-
- #Bind to the server
- result = self.conn.connect()
-
- if(result['bool']):
-
- # A dict to help build the "body" of the object
- logger.debug(" \r\n \t LDAP LdapAddUser attrs %s "
- % user_ldap_attrs)
-
- # The dn of our new entry/object
- dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
-
- try:
- ldif = modlist.addModlist(user_ldap_attrs)
- logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"
- % (user_ldap_attrs, ldif))
- self.conn.ldapserv.add_s(dn, ldif)
-
- logger.info("Adding user %s login %s in LDAP"
- % (user_ldap_attrs['cn'], user_ldap_attrs['uid']))
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Add Error %s" % error)
- return {'bool': False, 'message': error}
-
- self.conn.close()
- return {'bool': True, 'uid': user_ldap_attrs['uid']}
- else:
- return result
-
- def LdapDelete(self, person_dn):
- """Deletes a person in LDAP. Uses the dn of the user.
-
- :param person_dn: user's ldap dn.
- :type person_dn: string
- :returns: dictionary with bool True if successful, bool False
- and the error if not.
- :rtype: dict
-
- """
- #Connect and bind
- result = self.conn.connect()
- if(result['bool']):
- try:
- self.conn.ldapserv.delete_s(person_dn)
- self.conn.close()
- return {'bool': True}
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Delete Error %s" % error)
- return {'bool': False, 'message': error}
-
- def LdapDeleteUser(self, record_filter):
- """Deletes a SFA person in LDAP, based on the user's hrn.
-
- :param record_filter: Filter to find the user to be deleted. Must
- contain at least the user's email.
- :type record_filter: dict
- :returns: dict with bool True if successful, bool False and error
- message otherwise.
- :rtype: dict
-
- .. seealso:: LdapFindUser docstring for more info on record filter.
- .. seealso:: LdapDelete for user deletion
-
- """
- #Find uid of the person
- person = self.LdapFindUser(record_filter, [])
- logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s"
- % (record_filter, person))
-
- if person:
- dn = 'uid=' + person['uid'] + "," + self.baseDN
- else:
- return {'bool': False}
-
- result = self.LdapDelete(dn)
- return result
-
- def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
- """ Modifies a LDAP entry, replaces user's old attributes with
- the new ones given.
-
- :param dn: user's absolute name in the LDAP hierarchy.
- :param old_attributes_dict: old user's attributes. Keys must match
- the ones used in the LDAP model.
- :param new_attributes_dict: new user's attributes. Keys must match
- the ones used in the LDAP model.
- :type dn: string
- :type old_attributes_dict: dict
- :type new_attributes_dict: dict
- :returns: dict bool True if Successful, bool False if not.
- :rtype: dict
-
- """
-
- ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
- # Connect and bind/authenticate
- result = self.conn.connect()
- if (result['bool']):
- try:
- self.conn.ldapserv.modify_s(dn, ldif)
- self.conn.close()
- return {'bool': True}
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapModify Error %s" % error)
- return {'bool': False}
-
-
- def LdapModifyUser(self, user_record, new_attributes_dict):
- """
-
- Gets the record from one user based on the user sfa recordand changes
- the attributes according to the specified new_attributes. Do not use
- this if we need to modify the uid. Use a ModRDN operation instead
- ( modify relative DN ).
-
- :param user_record: sfa user record.
- :param new_attributes_dict: new user attributes, keys must be the
- same as the LDAP model.
- :type user_record: dict
- :type new_attributes_dict: dict
- :returns: bool True if successful, bool False if not.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record for info on what is mandatory
- in the user_record.
- .. seealso:: make_ldap_attributes_from_record for the LDAP objectclass.
-
- """
- if user_record is None:
- logger.error("LDAP \t LdapModifyUser Need user record ")
- return {'bool': False}
-
- #Get all the attributes of the user_uid_login
- #person = self.LdapFindUser(record_filter,[])
- req_ldap = self.make_ldap_filters_from_record(user_record)
- person_list = self.LdapSearch(req_ldap, [])
- logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s"
- % (person_list))
-
- if person_list and len(person_list) > 1:
- logger.error("LDAP \t LdapModifyUser Too many users returned")
- return {'bool': False}
- if person_list is None:
- logger.error("LDAP \t LdapModifyUser User %s doesn't exist "
- % (user_record))
- return {'bool': False}
-
- # The dn of our existing entry/object
- #One result only from ldapSearch
- person = person_list[0][1]
- dn = 'uid=' + person['uid'][0] + "," + self.baseDN
-
- if new_attributes_dict:
- old = {}
- for k in new_attributes_dict:
- if k not in person:
- old[k] = ''
- else:
- old[k] = person[k]
- logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"
- % (new_attributes_dict))
- result = self.LdapModify(dn, old, new_attributes_dict)
- return result
- else:
- logger.error("LDAP \t LdapModifyUser No new attributes given. ")
- return {'bool': False}
-
-
- def LdapMarkUserAsDeleted(self, record):
- """
-
- Sets shadowExpire to 0, disabling the user in LDAP. Calls LdapModifyUser
- to change the shadowExpire of the user.
-
- :param record: the record of the user who has to be disabled.
- Should contain first_name,last_name, email or mail, and if the
- record is enabled or not. If the dict record does not have all of
- these, must at least contain the user's email.
- :type record: dict
- :returns: {bool: True} if successful or {bool: False} if not
- :rtype: dict
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
- """
-
- new_attrs = {}
- #Disable account
- new_attrs['shadowExpire'] = '0'
- logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
- ret = self.LdapModifyUser(record, new_attrs)
- return ret
-
- def LdapResetPassword(self, record):
- """Resets password for the user whose record is the parameter and
- changes the corresponding entry in the LDAP.
-
- :param record: user's sfa record whose Ldap password must be reset.
- Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: return value of LdapModifyUser. True if successful, False
- otherwise.
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
-
- """
- password = self.login_pwd.generate_password()
- attrs = {}
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
- logger.debug("LDAP LdapResetPassword encrypt_password %s"
- % (attrs['userPassword']))
- result = self.LdapModifyUser(record, attrs)
- return result
-
-
- def LdapSearch(self, req_ldap=None, expected_fields=None):
- """
- Used to search directly in LDAP, by using ldap filters and return
- fields. When req_ldap is None, returns all the entries in the LDAP.
-
- :param req_ldap: ldap style request, with appropriate filters,
- example: (cn=*).
- :param expected_fields: Fields in the user ldap entry that has to be
- returned. If None is provided, will return 'mail', 'givenName',
- 'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
- :type req_ldap: string
- :type expected_fields: list
-
- .. seealso:: make_ldap_filters_from_record for req_ldap format.
-
- """
- result = self.conn.connect(bind=False)
- if (result['bool']):
-
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey', 'shadowExpire']
- else:
- return_fields_list = expected_fields
- #No specifc request specified, get the whole LDAP
- if req_ldap is None:
- req_ldap = '(cn=*)'
-
- logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
- return_fields_list %s" \
- %(req_ldap, return_fields_list))
-
- try:
- msg_id = self.conn.ldapserv.search(
- self.baseDN, ldap.SCOPE_SUBTREE,
- req_ldap, return_fields_list)
- #Get all the results matching the search from ldap in one
- #shot (1 value)
- result_type, result_data = \
- self.conn.ldapserv.result(msg_id, 1)
-
- self.conn.close()
-
- logger.debug("LDAP.PY \t LdapSearch result_data %s"
- % (result_data))
-
- return result_data
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapSearch Error %s" % error)
- return []
-
- else:
- logger.error("LDAP.PY \t Connection Failed")
- return
-
- def _process_ldap_info_for_all_users(self, result_data):
- """Process the data of all enabled users in LDAP.
-
- :param result_data: Contains information of all enabled users in LDAP
- and is coming from LdapSearch.
- :param result_data: list
-
- .. seealso:: LdapSearch
-
- """
- results = []
- logger.debug(" LDAP.py _process_ldap_info_for_all_users result_data %s "
- % (result_data))
- for ldapentry in result_data:
- logger.debug(" LDAP.py _process_ldap_info_for_all_users \
- ldapentry name : %s " % (ldapentry[1]['uid'][0]))
- tmpname = ldapentry[1]['uid'][0]
- hrn = self.authname + "." + tmpname
-
- tmpemail = ldapentry[1]['mail'][0]
- if ldapentry[1]['mail'][0] == "unknown":
- tmpemail = None
-
- try:
- results.append({
- 'type': 'user',
- 'pkey': ldapentry[1]['sshPublicKey'][0],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname ,
- 'email':tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry[1]['givenName'][0],
- 'last_name': ldapentry[1]['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': self.authname,
- 'peer_authority': '',
- 'pointer': -1,
- 'hrn': hrn,
- })
- except KeyError, error:
- logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s"
- % (error))
- return
-
- return results
-
- def _process_ldap_info_for_one_user(self, record, result_data):
- """
-
- Put the user's ldap data into shape. Only deals with one user
- record and one user data from ldap.
-
- :param record: user record
- :param result_data: Raw ldap data coming from LdapSearch
- :returns: user's data dict with 'type','pkey','uid', 'email',
- 'first_name' 'last_name''serial''authority''peer_authority'
- 'pointer''hrn'
- :type record: dict
- :type result_data: list
- :rtype :dict
-
- """
- #One entry only in the ldap data because we used a filter
- #to find one user only
- ldapentry = result_data[0][1]
- logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" % (ldapentry))
- tmpname = ldapentry['uid'][0]
-
- tmpemail = ldapentry['mail'][0]
- if ldapentry['mail'][0] == "unknown":
- tmpemail = None
-
- parent_hrn = None
- peer_authority = None
- if 'hrn' in record:
- hrn = record['hrn']
- parent_hrn = get_authority(hrn)
- if parent_hrn != self.authname:
- peer_authority = parent_hrn
- #In case the user was not imported from Iotlab LDAP
- #but from another federated site, has an account in
- #iotlab but currently using his hrn from federated site
- #then the login is different from the one found in its hrn
- if tmpname != hrn.split('.')[1]:
- hrn = None
- else:
- hrn = None
-
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname,
- 'email': tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer': -1,
- 'hrn': hrn,
- }
- return results
-
- def LdapFindUser(self, record=None, is_user_enabled=None,
- expected_fields=None):
- """
-
- Search a SFA user with a hrn. User should be already registered
- in Iotlab LDAP.
-
- :param record: sfa user's record. Should contain first_name,last_name,
- email or mail. If no record is provided, returns all the users found
- in LDAP.
- :type record: dict
- :param is_user_enabled: is the user's iotlab account already valid.
- :type is_user_enabled: Boolean.
- :returns: LDAP entries from ldap matching the filter provided. Returns
- a single entry if one filter has been given and a list of
- entries otherwise.
- :rtype: dict or list
-
- """
- custom_record = {}
- if is_user_enabled:
- custom_record['enabled'] = is_user_enabled
- if record:
- custom_record.update(record)
-
- req_ldap = self.make_ldap_filters_from_record(custom_record)
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey']
- else:
- return_fields_list = expected_fields
-
- result_data = self.LdapSearch(req_ldap, return_fields_list)
- logger.debug("LDAP.PY \t LdapFindUser result_data %s" % (result_data))
-
- if len(result_data) == 0:
- return None
- #Asked for a specific user
- if record is not None:
- results = self._process_ldap_info_for_one_user(record, result_data)
-
- else:
- #Asked for all users in ldap
- results = self._process_ldap_info_for_all_users(result_data)
- return results
\ No newline at end of file
+++ /dev/null
-"""
-File providing methods to generate valid RSpecs for the Iotlab testbed.
-Contains methods to get information on slice, slivers, nodes and leases,
-formatting them and turn it into a RSpec.
-"""
-from sfa.util.xrn import hrn_to_urn, urn_to_hrn
-from sfa.util.sfatime import utcparse, datetime_to_string
-
-from sfa.iotlab.iotlabxrn import IotlabXrn, xrn_object
-from sfa.rspecs.rspec import RSpec
-
-#from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.node import NodeElement
-from sfa.rspecs.elements.login import Login
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.lease import Lease
-from sfa.rspecs.elements.granularity import Granularity
-from sfa.rspecs.version_manager import VersionManager
-from sfa.storage.model import SliverAllocation
-from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition, \
- IotlabLocation
-
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import Xrn
-
-import time
-
-class CortexlabAggregate:
- """Aggregate manager class for cortexlab. """
-
- sites = {}
- nodes = {}
- api = None
- interfaces = {}
- links = {}
- node_tags = {}
-
- prepared = False
-
- user_options = {}
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_slice_and_slivers(self, slice_xrn, login=None):
- """
- Get the slices and the associated leases if any, from the cortexlab
- testbed. One slice can have mutliple leases.
- For each slice, get the nodes in the associated lease
- and create a sliver with the necessary info and insert it into the
- sliver dictionary, keyed on the node hostnames.
- Returns a dict of slivers based on the sliver's node_id.
- Called by get_rspec.
-
-
- :param slice_xrn: xrn of the slice
- :param login: user's login on cortexlab ldap
-
- :type slice_xrn: string
- :type login: string
- :returns: a list of slices dict and a list of Sliver object
- :rtype: (list, list)
-
- .. note: There is no real slivers in cortexlab, only leases. The goal
- is to be consistent with the SFA standard.
-
- """
-
-
- slivers = {}
- sfa_slice = None
- if slice_xrn is None:
- return (sfa_slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
-
- # GetSlices always returns a list, even if there is only one element
- slices = self.driver.GetSlices(slice_filter=str(slice_hrn),
- slice_filter_type='slice_hrn',
- login=login)
-
- logger.debug("CortexlabAggregate api \tget_slice_and_slivers \
- slice_hrn %s \r\n slices %s self.driver.hrn %s"
- % (slice_hrn, slices, self.driver.hrn))
- if slices == []:
- return (sfa_slice, slivers)
-
- # sort slivers by node id , if there is a job
- #and therefore, node allocated to this slice
- # for sfa_slice in slices:
- sfa_slice = slices[0]
- try:
- node_ids_list = sfa_slice['node_ids']
- except KeyError:
- logger.log_exc("CORTEXLABAGGREGATE \t \
- get_slice_and_slivers No nodes in the slice \
- - KeyError ")
- node_ids_list = []
- # continue
-
- for node in node_ids_list:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id': sliver_xrn.urn,
- 'name': sfa_slice['hrn'],
- 'type': 'cortexlab-node',
- 'tags': []})
-
- slivers[node] = sliver
-
- #Add default sliver attribute :
- #connection information for cortexlab, assuming it is the same ssh
- # connection process
- # look in ldap:
- ldap_username = self.find_ldap_username_from_slice(sfa_slice)
-
- if ldap_username is not None:
- ssh_access = None
- slivers['default_sliver'] = {'ssh': ssh_access,
- 'login': ldap_username}
-
-
- logger.debug("CORTEXLABAGGREGATE api get_slice_and_slivers slivers %s "
- % (slivers))
- return (slices, slivers)
-
-
- def find_ldap_username_from_slice(self, sfa_slice):
- """
- Gets the ldap username of the user based on the information contained
- in ist sfa_slice record.
-
- :param sfa_slice: the user's slice record. Must contain the
- reg_researchers key.
- :type sfa_slice: dictionary
- :returns: ldap_username, the ldap user's login.
- :rtype: string
- """
-
- researchers = [sfa_slice['reg_researchers'][0].__dict__]
- # look in ldap:
- ldap_username = None
- ret = self.driver.testbed_shell.GetPersons(researchers)
- if len(ret) != 0:
- ldap_username = ret[0]['uid']
-
- return ldap_username
-
-
-
- def get_nodes(self, options=None):
- """Returns the nodes in the slice using the rspec format, with all the
- nodes' properties.
-
- Fetch the nodes ids in the slices dictionary and get all the nodes
- properties from OAR. Makes a rspec dicitonary out of this and returns
- it. If the slice does not have any job running or scheduled, that is
- it has no reserved nodes, then returns an empty list.
-
- :returns: An empty list if the slice has no reserved nodes, a rspec
- list with all the nodes and their properties (a dict per node)
- otherwise.
- :rtype: list
-
- .. seealso:: get_slice_and_slivers
-
- """
- filter_nodes = None
- if options:
- geni_available = options.get('geni_available')
- if geni_available == True:
- filter_nodes['boot_state'] = ['Alive']
-
- # slice_nodes_list = []
- # if slices is not None:
- # for one_slice in slices:
- # try:
- # slice_nodes_list = one_slice['node_ids']
- # # if we are dealing with a slice that has no node just
- # # return an empty list. In iotlab a slice can have multiple
- # # jobs scheduled, so it either has at least one lease or
- # # not at all.
- # except KeyError:
- # return []
-
- # get the granularity in second for the reservation system
- # grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- nodes = self.driver.testbed_shell.GetNodes(node_filter_dict =
- filter_nodes)
-
- nodes_dict = {}
-
- #if slices, this means we got to list all the nodes given to this slice
- # Make a list of all the nodes in the slice before getting their
- #attributes
-
- for node in nodes:
- nodes_dict[node['node_id']] = node
-
- return nodes_dict
-
-
- def node_to_rspec_node(self, node):
- """ Creates a rspec node structure with the appropriate information
- based on the node information that can be found in the node dictionary.
-
- :param node: node data. this dict contains information about the node
- and must have the following keys : mobile, radio, archi, hostname,
- boot_state, site, x, y ,z (position).
- :type node: dictionary.
-
- :returns: node dictionary containing the following keys : mobile, archi,
- radio, component_id, component_name, component_manager_id,
- authority_id, boot_state, exclusive, hardware_types, location,
- position, granularity, tags.
- :rtype: dict
-
- """
-
- grain = self.driver.testbed_shell.GetLeaseGranularity()
- rspec_node = NodeElement()
-
- # xxx how to retrieve site['login_base']
- #site_id=node['site_id']
- #site=sites_dict[site_id]
-
- rspec_node['mobile'] = node['mobile']
- rspec_node['archi'] = node['archi']
- rspec_node['radio'] = node['radio']
- cortexlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node['hostname'])
-
- rspec_node['component_id'] = cortexlab_xrn.urn
- rspec_node['component_name'] = node['hostname']
- rspec_node['component_manager_id'] = \
- hrn_to_urn(self.driver.testbed_shell.root_auth,
- 'authority+sa')
-
- # Iotlab's nodes are federated : there is only one authority
- # for all Iotlab sites, registered in SFA.
- # Removing the part including the site
- # in authority_id SA 27/07/12
- rspec_node['authority_id'] = rspec_node['component_manager_id']
-
- # do not include boot state (<available> element)
- #in the manifest rspec
-
-
- rspec_node['boot_state'] = node['boot_state']
- # if node['hostname'] in reserved_nodes:
- # rspec_node['boot_state'] = "Reserved"
- rspec_node['exclusive'] = 'true'
- rspec_node['hardware_types'] = [HardwareType({'name': \
- 'iotlab-node'})]
-
- location = IotlabLocation({'country':'France', 'site': \
- node['site']})
- rspec_node['location'] = location
-
-
- position = IotlabPosition()
- for field in position :
- try:
- position[field] = node[field]
- except KeyError, error :
- logger.log_exc("Cortexlabaggregate\t node_to_rspec_node \
- position %s "% (error))
-
- rspec_node['position'] = position
-
-
- # Granularity
- granularity = Granularity({'grain': grain})
- rspec_node['granularity'] = granularity
- rspec_node['tags'] = []
- # if node['hostname'] in slivers:
- # # add sliver info
- # sliver = slivers[node['hostname']]
- # rspec_node['sliver_id'] = sliver['sliver_id']
- # rspec_node['client_id'] = node['hostname']
- # rspec_node['slivers'] = [sliver]
-
- # # slivers always provide the ssh service
- # login = Login({'authentication': 'ssh-keys', \
- # 'hostname': node['hostname'], 'port':'22', \
- # 'username': sliver['name']})
- # service = Services({'login': login})
- # rspec_node['services'] = [service]
-
- return rspec_node
-
-
- def rspec_node_to_geni_sliver(self, rspec_node, sliver_allocations=None):
- """Makes a geni sliver structure from all the nodes allocated
- to slivers in the sliver_allocations dictionary. Returns the states
- of the sliver.
-
- :param rspec_node: Node information contained in a rspec data structure
- fashion.
- :type rspec_node: dictionary
- :param sliver_allocations:
- :type sliver_allocations: dictionary
-
- :returns: Dictionary with the following keys: geni_sliver_urn,
- geni_expires, geni_allocation_status, geni_operational_status,
- geni_error.
-
- :rtype: dictionary
-
- .. seealso:: node_to_rspec_node
-
- """
- if sliver_allocations is None: sliver_allocations={}
-
- if rspec_node['sliver_id'] in sliver_allocations:
- # set sliver allocation and operational status
- sliver_allocation = sliver_allocations[rspec_node['sliver_id']]
- if sliver_allocation:
- allocation_status = sliver_allocation.allocation_state
- if allocation_status == 'geni_allocated':
- op_status = 'geni_pending_allocation'
- elif allocation_status == 'geni_provisioned':
- op_status = 'geni_ready'
- else:
- op_status = 'geni_unknown'
- else:
- allocation_status = 'geni_unallocated'
- else:
- allocation_status = 'geni_unallocated'
- op_status = 'geni_failed'
- # required fields
- geni_sliver = {'geni_sliver_urn': rspec_node['sliver_id'],
- 'geni_expires': rspec_node['expires'],
- 'geni_allocation_status' : allocation_status,
- 'geni_operational_status': op_status,
- 'geni_error': '',
- }
- return geni_sliver
-
- def sliver_to_rspec_node(self, sliver, sliver_allocations):
- """Used by describe to format node information into a rspec compliant
- structure.
-
- Creates a node rspec compliant structure by calling node_to_rspec_node.
- Adds slivers, if any, to rspec node structure. Returns the updated
- rspec node struct.
-
- :param sliver: sliver dictionary. Contains keys: urn, slice_id, hostname
- and slice_name.
- :type sliver: dictionary
- :param sliver_allocations: dictionary of slivers
- :type sliver_allocations: dict
-
- :returns: Node dictionary with all necessary data.
-
- .. seealso:: node_to_rspec_node
- """
- rspec_node = self.node_to_rspec_node(sliver)
- rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
- # add sliver info
- logger.debug("CORTEXLABAGGREGATE api \t sliver_to_rspec_node sliver \
- %s \r\nsliver_allocations %s" % (sliver,
- sliver_allocations))
- rspec_sliver = Sliver({'sliver_id': sliver['urn'],
- 'name': sliver['slice_id'],
- 'type': 'iotlab-exclusive',
- 'tags': []})
- rspec_node['sliver_id'] = rspec_sliver['sliver_id']
-
- if sliver['urn'] in sliver_allocations:
- rspec_node['client_id'] = sliver_allocations[
- sliver['urn']].client_id
- if sliver_allocations[sliver['urn']].component_id:
- rspec_node['component_id'] = sliver_allocations[
- sliver['urn']].component_id
- rspec_node['slivers'] = [rspec_sliver]
-
- # slivers always provide the ssh service
- login = Login({'authentication': 'ssh-keys',
- 'hostname': sliver['hostname'],
- 'port':'22',
- 'username': sliver['slice_name'],
- 'login': sliver['slice_name']
- })
- return rspec_node
-
-
- def get_all_leases(self, ldap_username):
- """
-
- Get list of lease dictionaries which all have the mandatory keys
- ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
- All the leases running or scheduled are returned.
-
- :param ldap_username: if ldap uid is not None, looks for the leases
- belonging to this user.
- :type ldap_username: string
- :returns: rspec lease dictionary with keys lease_id, component_id,
- slice_id, start_time, duration where the lease_id is the oar job id,
- component_id is the node's urn, slice_id is the slice urn,
- start_time is the timestamp starting time and duration is expressed
- in terms of the testbed's granularity.
- :rtype: dict
-
- .. note::There is no filtering of leases within a given time frame.
- All the running or scheduled leases are returned. options
- removed SA 15/05/2013
-
-
- """
-
- logger.debug("CortexlabAggregate get_all_leases ldap_username %s "
- % (ldap_username))
- leases = self.driver.driver.GetLeases(login=ldap_username)
- grain = self.driver.testbed_shell.GetLeaseGranularity()
- # site_ids = []
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
-
- cortexlab_xrn = xrn_object(
- self.driver.testbed_shell.root_auth, node)
- rspec_lease['component_id'] = cortexlab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in cortexlab_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
- def get_rspec(self, slice_xrn=None, login=None, version=None,
- options=None):
- """
- Returns xml rspec:
- - a full advertisement rspec with the testbed resources if slice_xrn is
- not specified.If a lease option is given, also returns the leases
- scheduled on the testbed.
- - a manifest Rspec with the leases and nodes in slice's leases if
- slice_xrn is not None.
-
- :param slice_xrn: srn of the slice
- :type slice_xrn: string
- :param login: user'uid (ldap login) on cortexlab
- :type login: string
- :param version: can be set to sfa or cortexlab
- :type version: RSpecVersion
- :param options: used to specify if the leases should also be included in
- the returned rspec.
- :type options: dict
-
- :returns: Xml Rspec.
- :rtype: XML
-
-
- """
-
- ldap_username = None
- rspec = None
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- logger.debug("CortexlabAggregate \t get_rspec ***version %s \
- version.type %s version.version %s options %s \r\n"
- % (version, version.type, version.version, options))
-
- if slice_xrn is None:
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
-
- else:
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
-
- slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
- if slice_xrn and slices is not None:
- #Get user associated with this slice
- #for one_slice in slices :
- ldap_username = self.find_ldap_username_from_slice(slices[0])
- # ldap_username = slices[0]['reg_researchers'][0].__dict__['hrn']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- logger.debug("CortexlabAggregate \tget_rspec **** \
- LDAP USERNAME %s \r\n" \
- % (ldap_username))
- #at this point sliver may be empty if no cortexlab job
- #is running for this user/slice.
- rspec = RSpec(version=rspec_version, user_options=options)
-
- logger.debug("\r\n \r\n CortexlabAggregate \tget_rspec *** \
- slice_xrn %s slices %s\r\n \r\n"
- % (slice_xrn, slices))
-
- if options is not None :
- lease_option = options['list_leases']
- else:
- #If no options are specified, at least print the resources
- lease_option = 'all'
- #if slice_xrn :
- #lease_option = 'all'
-
- if lease_option in ['all', 'resources']:
- #if not options.get('list_leases') or options.get('list_leases')
- #and options['list_leases'] != 'leases':
- nodes = self.get_nodes()
- logger.debug("\r\n")
- logger.debug("CortexlabAggregate \t lease_option %s \
- get rspec ******* nodes %s"
- % (lease_option, nodes))
-
- sites_set = set([node['location']['site'] for node in nodes])
-
- #In case creating a job, slice_xrn is not set to None
- rspec.version.add_nodes(nodes)
- if slice_xrn and slices is not None:
- # #Get user associated with this slice
- # #for one_slice in slices :
- # ldap_username = slices[0]['reg_researchers']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- # # ldap_username = tmp[1].split('_')[0]
-
- logger.debug("CortexlabAggregate \tget_rspec **** \
- version type %s ldap_ user %s \r\n" \
- % (version.type, ldap_username))
- #TODO : Change the version of Rspec here in case of pbm -SA 09/01/14
- if version.type in ["Cortexlab", "Iotlab"]:
- rspec.version.add_connection_information(
- ldap_username, sites_set)
-
- default_sliver = slivers.get('default_sliver', [])
- if default_sliver and len(nodes) is not 0:
- #default_sliver_attribs = default_sliver.get('tags', [])
- logger.debug("CortexlabAggregate \tget_rspec **** \
- default_sliver%s \r\n" % (default_sliver))
- for attrib in default_sliver:
- rspec.version.add_default_sliver_attribute(
- attrib, default_sliver[attrib])
-
- if lease_option in ['all','leases']:
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
- logger.debug("CortexlabAggregate \tget_rspec **** \
- FINAL RSPEC %s \r\n" % (rspec.toxml()))
- return rspec.toxml()
-
-
-
- def get_slivers(self, urns, options=None):
- """Get slivers of the given slice urns. Slivers contains slice, node and
- user information.
-
- For Iotlab, returns the leases with sliver ids and their allocation
- status.
-
- :param urns: list of slice urns.
- :type urns: list of strings
- :param options: unused
- :type options: unused
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
- """
- if options is None: options={}
-
- slice_ids = set()
- node_ids = []
- for urn in urns:
- xrn = IotlabXrn(xrn=urn)
- if xrn.type == 'sliver':
- # id: slice_id-node_id
- try:
- sliver_id_parts = xrn.get_sliver_id_parts()
- slice_id = int(sliver_id_parts[0])
- node_id = int(sliver_id_parts[1])
- slice_ids.add(slice_id)
- node_ids.append(node_id)
- except ValueError:
- pass
- else:
- slice_names = set()
- slice_names.add(xrn.hrn)
-
-
- logger.debug("CortexlabAggregate \t get_slivers urns %s slice_ids %s \
- node_ids %s\r\n" % (urns, slice_ids, node_ids))
- logger.debug("CortexlabAggregate \t get_slivers xrn %s slice_names %s \
- \r\n" % (xrn, slice_names))
- filter_sliver = {}
- if slice_names:
- filter_sliver['slice_hrn'] = list(slice_names)
- slice_hrn = filter_sliver['slice_hrn'][0]
-
- slice_filter_type = 'slice_hrn'
-
- # if slice_ids:
- # filter['slice_id'] = list(slice_ids)
- # # get slices
- if slice_hrn:
- slices = self.driver.GetSlices(slice_hrn,
- slice_filter_type)
- leases = self.driver.GetLeases({'slice_hrn':slice_hrn})
- logger.debug("CortexlabAggregate \t get_slivers \
- slices %s leases %s\r\n" % (slices, leases ))
- if not slices:
- return []
-
- single_slice = slices[0]
- # get sliver users
- user = single_slice['reg_researchers'][0].__dict__
- logger.debug("CortexlabAggregate \t get_slivers user %s \
- \r\n" % (user))
-
- # construct user key info
- person = self.driver.testbed_shell.ldap.LdapFindUser(record=user)
- logger.debug("CortexlabAggregate \t get_slivers person %s \
- \r\n" % (person))
- # name = person['last_name']
- user['login'] = person['uid']
- user['user_urn'] = hrn_to_urn(user['hrn'], 'user')
- user['keys'] = person['pkey']
-
-
- try:
- node_ids = single_slice['node_ids']
- node_list = self.driver.testbed_shell.GetNodes(
- {'hostname':single_slice['node_ids']})
- node_by_hostname = dict([(node['hostname'], node)
- for node in node_list])
- except KeyError:
- logger.warning("\t get_slivers No slivers in slice")
- # slice['node_ids'] = node_ids
- # nodes_dict = self.get_slice_nodes(slice, options)
-
- slivers = []
- for current_lease in leases:
- for hostname in current_lease['reserved_nodes']:
- node = {}
- node['slice_id'] = current_lease['slice_id']
- node['slice_hrn'] = current_lease['slice_hrn']
- slice_name = current_lease['slice_hrn'].split(".")[1]
- node['slice_name'] = slice_name
- index = current_lease['reserved_nodes'].index(hostname)
- node_id = current_lease['resource_ids'][index]
- # node['slice_name'] = user['login']
- # node.update(single_slice)
- more_info = node_by_hostname[hostname]
- node.update(more_info)
- # oar_job_id is the slice_id (lease_id)
- sliver_hrn = '%s.%s-%s' % (self.driver.hrn,
- current_lease['lease_id'], node_id)
- node['node_id'] = node_id
- node['expires'] = current_lease['t_until']
- node['sliver_id'] = Xrn(sliver_hrn, type='sliver').urn
- node['urn'] = node['sliver_id']
- node['services_user'] = [user]
-
- slivers.append(node)
- return slivers
-
-
- def list_resources(self, version = None, options=None):
- """
- Returns an advertisement Rspec of available resources at this
- aggregate. This Rspec contains a resource listing along with their
- description, providing sufficient information for clients to be able to
- select among available resources.
-
- :param options: various options. The valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}} . The only
- mandatory options if options is specified is geni_rspec_version.
- :type options: dictionary
-
- :returns: On success, the value field of the return struct will contain
- a geni.rspec advertisment RSpec
- :rtype: Rspec advertisement in xml.
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#RSpecdatatype
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources
- """
-
- if options is None: options={}
-
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
- rspec = RSpec(version=rspec_version, user_options=options)
- # variable ldap_username to be compliant with get_all_leases
- # prototype. Now unused in geni-v3 since we are getting all the leases
- # here
- ldap_username = None
- if not options.get('list_leases') or options['list_leases'] != 'leases':
- # get nodes
- nodes_dict = self.get_nodes(options)
-
- # no interfaces on iotlab nodes
- # convert nodes to rspec nodes
- rspec_nodes = []
- for node_id in nodes_dict:
- node = nodes_dict[node_id]
- rspec_node = self.node_to_rspec_node(node)
- rspec_nodes.append(rspec_node)
- rspec.version.add_nodes(rspec_nodes)
-
- # add links
- # links = self.get_links(sites, nodes_dict, interfaces)
- # rspec.version.add_links(links)
-
- if not options.get('list_leases') or options.get('list_leases') \
- and options['list_leases'] != 'resources':
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
-
- return rspec.toxml()
-
-
- def describe(self, urns, version=None, options=None):
- """
- Retrieve a manifest RSpec describing the resources contained by the
- named entities, e.g. a single slice or a set of the slivers in a slice.
- This listing and description should be sufficiently descriptive to allow
- experimenters to use the resources.
-
- :param urns: If a slice urn is supplied and there are no slivers in the
- given slice at this aggregate, then geni_rspec shall be a valid
- manifest RSpec, containing no node elements - no resources.
- :type urns: list or strings
- :param options: various options. the valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}}
- :type options: dictionary
-
- :returns: On success returns the following dictionary {geni_rspec:
- <geni.rspec, a Manifest RSpec>, geni_urn: <string slice urn of the
- containing slice>, geni_slivers:{ geni_sliver_urn:
- <string sliver urn>, geni_expires: <dateTime.rfc3339 allocation
- expiration string, as in geni_expires from SliversStatus>,
- geni_allocation_status: <string sliver state - e.g. geni_allocated
- or geni_provisioned >, geni_operational_status:
- <string sliver operational state>, geni_error: <optional string.
- The field may be omitted entirely but may not be null/None,
- explaining any failure for a sliver.>}
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
- """
- if options is None: options={}
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
- rspec = RSpec(version=rspec_version, user_options=options)
-
- # get slivers
- geni_slivers = []
- slivers = self.get_slivers(urns, options)
- if slivers:
- rspec_expires = datetime_to_string(utcparse(slivers[0]['expires']))
- else:
- rspec_expires = datetime_to_string(utcparse(time.time()))
- rspec.xml.set('expires', rspec_expires)
-
- # lookup the sliver allocations
- geni_urn = urns[0]
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- logger.debug(" Cortexlabaggregate.PY \tDescribe sliver_ids %s "
- % (sliver_ids))
- constraint = SliverAllocation.sliver_id.in_(sliver_ids)
- query = self.driver.api.dbsession().query(SliverAllocation)
- sliver_allocations = query.filter((constraint)).all()
- logger.debug(" Cortexlabaggregate.PY \tDescribe sliver_allocations %s "
- % (sliver_allocations))
- sliver_allocation_dict = {}
- for sliver_allocation in sliver_allocations:
- geni_urn = sliver_allocation.slice_urn
- sliver_allocation_dict[sliver_allocation.sliver_id] = \
- sliver_allocation
-
- # add slivers
- nodes_dict = {}
- for sliver in slivers:
- nodes_dict[sliver['node_id']] = sliver
- rspec_nodes = []
- for sliver in slivers:
- rspec_node = self.sliver_to_rspec_node(sliver,
- sliver_allocation_dict)
- rspec_nodes.append(rspec_node)
- logger.debug(" Cortexlabaggregate.PY \tDescribe sliver_allocation_dict %s "
- % (sliver_allocation_dict))
- geni_sliver = self.rspec_node_to_geni_sliver(rspec_node,
- sliver_allocation_dict)
- geni_slivers.append(geni_sliver)
-
- logger.debug(" Cortexlabaggregate.PY \tDescribe rspec_nodes %s\
- rspec %s "
- % (rspec_nodes, rspec))
- rspec.version.add_nodes(rspec_nodes)
-
- return {'geni_urn': geni_urn,
- 'geni_rspec': rspec.toxml(),
- 'geni_slivers': geni_slivers}
\ No newline at end of file
+++ /dev/null
-"""
-Implements what a driver should provide for SFA to work.
-"""
-from datetime import datetime
-from sfa.util.sfatime import utcparse, datetime_to_string
-
-from sfa.util.faults import SliverDoesNotExist, Forbidden
-from sfa.util.sfalogging import logger
-
-from sfa.trust.hierarchy import Hierarchy
-from sfa.trust.gid import create_uuid
-
-from sfa.managers.driver import Driver
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-
-from sfa.cortexlab.cortexlabaggregate import CortexlabAggregate
-
-from sfa.cortexlab.cortexlabslices import CortexlabSlices
-from sfa.cortexlab.cortexlabshell import CortexlabShell
-
-from sfa.iotlab.iotlabxrn import IotlabXrn, xrn_object
-from sfa.util.xrn import Xrn, hrn_to_urn, get_authority, urn_to_hrn
-
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.credential import Credential
-from sfa.storage.model import SliverAllocation
-
-from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
-from sfa.iotlab.iotlabpostgres import LeaseTableXP
-from sqlalchemy.orm import joinedload
-
-class CortexlabDriver(Driver):
- """ Cortexlab Driver class inherited from Driver generic class.
-
- Contains methods compliant with the SFA standard and the testbed
- infrastructure (calls to LDAP and scheduler to book the nodes).
-
- .. seealso::: Driver class
-
- """
- def __init__(self, api):
- """
-
- Sets the Cortexlab SFA config parameters,
- instanciates the testbed api.
-
- :param api: SfaApi configuration object. Holds reference to the
- database.
- :type api: SfaApi object
- """
-
- Driver.__init__(self, api)
- self.api = api
- config = api.config
- self.testbed_shell = CortexlabShell(config)
- self.cache = None
-
- def GetPeers(self, peer_filter=None ):
- """ Gathers registered authorities in SFA DB and looks for specific peer
- if peer_filter is specified.
- :param peer_filter: name of the site authority looked for.
- :type peer_filter: string
- :returns: list of records.
-
- """
-
- existing_records = {}
- existing_hrns_by_types = {}
- logger.debug("CORTEXLAB_API \tGetPeers peer_filter %s " % (peer_filter))
- query = self.api.dbsession().query(RegRecord)
- all_records = query.filter(RegRecord.type.like('%authority%')).all()
-
- for record in all_records:
- existing_records[(record.hrn, record.type)] = record
- if record.type not in existing_hrns_by_types:
- existing_hrns_by_types[record.type] = [record.hrn]
- else:
- existing_hrns_by_types[record.type].append(record.hrn)
-
- logger.debug("CORTEXLAB_API \tGetPeer\texisting_hrns_by_types %s "
- % (existing_hrns_by_types))
- records_list = []
-
- try:
- if peer_filter:
- records_list.append(existing_records[(peer_filter,
- 'authority')])
- else:
- for hrn in existing_hrns_by_types['authority']:
- records_list.append(existing_records[(hrn, 'authority')])
-
- logger.debug("CORTEXLAB_API \tGetPeer \trecords_list %s "
- % (records_list))
-
- except KeyError:
- pass
-
- return_records = records_list
- logger.debug("CORTEXLAB_API \tGetPeer return_records %s "
- % (return_records))
- return return_records
-
- def GetKeys(self, key_filter=None):
- """Returns a dict of dict based on the key string. Each dict entry
- contains the key id, the ssh key, the user's email and the
- user's hrn.
- If key_filter is specified and is an array of key identifiers,
- only keys matching the filter will be returned.
-
- Admin may query all keys. Non-admins may only query their own keys.
- FROM PLC API DOC
-
- :returns: dict with ssh key as key and dicts as value.
- :rtype: dict
- """
- query = self.api.dbsession().query(RegKey)
- if key_filter is None:
- keys = query.options(joinedload('reg_user')).all()
- else:
- constraint = RegKey.key.in_(key_filter)
- keys = query.options(joinedload('reg_user')).filter(constraint).all()
-
- key_dict = {}
- for key in keys:
- key_dict[key.key] = {'key_id': key.key_id, 'key': key.key,
- 'email': key.reg_user.email,
- 'hrn': key.reg_user.hrn}
-
- #ldap_rslt = self.ldap.LdapSearch({'enabled']=True})
- #user_by_email = dict((user[1]['mail'][0], user[1]['sshPublicKey']) \
- #for user in ldap_rslt)
-
- logger.debug("CORTEXLAB_API GetKeys -key_dict %s \r\n " % (key_dict))
- return key_dict
-
- def AddPerson(self, record):
- """
-
- Adds a new account. Any fields specified in records are used,
- otherwise defaults are used. Creates an appropriate login by calling
- LdapAddUser.
-
- :param record: dictionary with the sfa user's properties.
- :returns: a dicitonary with the status. If successful, the dictionary
- boolean is set to True and there is a 'uid' key with the new login
- added to LDAP, otherwise the bool is set to False and a key
- 'message' is in the dictionary, with the error message.
- :rtype: dict
-
- """
- ret = self.testbed_shell.ldap.LdapAddUser(record)
-
- if ret['bool'] is True:
- record['hrn'] = self.testbed_shell.root_auth + '.' + ret['uid']
- logger.debug("Cortexlab api AddPerson return code %s record %s "
- % (ret, record))
- self.__add_person_to_db(record)
- return ret
-
- def __add_person_to_db(self, user_dict):
- """
- Add a federated user straight to db when the user issues a lease
- request with nodes and that he has not registered with cortexlab
- yet (that is he does not have a LDAP entry yet).
- Uses parts of the routines in CortexlabImport when importing user
- from LDAP.
- Called by AddPerson, right after LdapAddUser.
- :param user_dict: Must contain email, hrn and pkey to get a GID
- and be added to the SFA db.
- :type user_dict: dict
-
- """
- request = self.api.dbsession().query(RegUser)
- check_if_exists = \
- request.filter_by(email = user_dict['email']).first()
- #user doesn't exists
- if not check_if_exists:
- logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
- " %(user_dict))
- hrn = user_dict['hrn']
- person_urn = hrn_to_urn(hrn, 'user')
- pubkey = user_dict['pkey']
- try:
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- logger.warn('__add_person_to_db: unable to convert public \
- key for %s' %(hrn ))
- pkey = Keypair(create=True)
-
-
- if pubkey is not None and pkey is not None :
- hierarchy = Hierarchy()
- person_gid = hierarchy.create_gid(person_urn, create_uuid(), \
- pkey)
- if user_dict['email']:
- logger.debug("__add_person_to_db \r\n \r\n \
- IOTLAB IMPORTER PERSON EMAIL OK email %s "\
- %(user_dict['email']))
- person_gid.set_email(user_dict['email'])
-
- user_record = RegUser(hrn=hrn , pointer= '-1', \
- authority=get_authority(hrn), \
- email=user_dict['email'], gid = person_gid)
- user_record.reg_keys = [RegKey(user_dict['pkey'])]
- user_record.just_created()
- self.api.dbsession().add (user_record)
- self.api.dbsession().commit()
- return
-
-
- def _sql_get_slice_info(self, slice_filter):
- """
- Get the slice record based on the slice hrn. Fetch the record of the
- user associated with the slice by using joinedload based on the
- reg_researcher relationship.
-
- :param slice_filter: the slice hrn we are looking for
- :type slice_filter: string
- :returns: the slice record enhanced with the user's information if the
- slice was found, None it wasn't.
-
- :rtype: dict or None.
- """
- #DO NOT USE RegSlice - reg_researchers to get the hrn
- #of the user otherwise will mess up the RegRecord in
- #Resolve, don't know why - SA 08/08/2012
-
- #Only one entry for one user = one slice in testbed_xp table
- #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
- request = self.api.dbsession().query(RegSlice)
- raw_slicerec = request.options(joinedload('reg_researchers')).filter_by(hrn=slice_filter).first()
- #raw_slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
- if raw_slicerec:
- #load_reg_researcher
- #raw_slicerec.reg_researchers
- raw_slicerec = raw_slicerec.__dict__
- logger.debug(" CORTEXLAB_API \t _sql_get_slice_info slice_filter %s \
- raw_slicerec %s" % (slice_filter, raw_slicerec))
- slicerec = raw_slicerec
- #only one researcher per slice so take the first one
- #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
- #del slicerec['reg_researchers']['_sa_instance_state']
- return slicerec
-
- else:
- return None
-
- def _sql_get_slice_info_from_user(self, slice_filter):
- """
- Get the slice record based on the user recordid by using a joinedload
- on the relationship reg_slices_as_researcher. Format the sql record
- into a dict with the mandatory fields for user and slice.
- :returns: dict with slice record and user record if the record was found
- based on the user's id, None if not..
- :rtype:dict or None..
- """
- #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- request = self.api.dbsession().query(RegUser)
- raw_slicerec = request.options(joinedload('reg_slices_as_researcher')).filter_by(record_id=slice_filter).first()
- #raw_slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- #Put it in correct order
- user_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'email', 'pointer']
- slice_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'pointer']
- if raw_slicerec:
- #raw_slicerec.reg_slices_as_researcher
- raw_slicerec = raw_slicerec.__dict__
- slicerec = {}
- slicerec = \
- dict([(k, raw_slicerec[
- 'reg_slices_as_researcher'][0].__dict__[k])
- for k in slice_needed_fields])
- slicerec['reg_researchers'] = dict([(k, raw_slicerec[k])
- for k in user_needed_fields])
- #TODO Handle multiple slices for one user SA 10/12/12
- #for now only take the first slice record associated to the rec user
- ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
- #del raw_slicerec['reg_slices_as_researcher']
- #slicerec['reg_researchers'] = raw_slicerec
- ##del slicerec['_sa_instance_state']
-
- return slicerec
-
- else:
- return None
-
-
- def _get_slice_records(self, slice_filter=None,
- slice_filter_type=None):
- """
- Get the slice record depending on the slice filter and its type.
- :param slice_filter: Can be either the slice hrn or the user's record
- id.
- :type slice_filter: string
- :param slice_filter_type: describes the slice filter type used, can be
- slice_hrn or record_id_user
- :type: string
- :returns: the slice record
- :rtype:dict
- .. seealso::_sql_get_slice_info_from_user
- .. seealso:: _sql_get_slice_info
- """
-
- #Get list of slices based on the slice hrn
- if slice_filter_type == 'slice_hrn':
-
- #if get_authority(slice_filter) == self.root_auth:
- #login = slice_filter.split(".")[1].split("_")[0]
-
- slicerec = self._sql_get_slice_info(slice_filter)
-
- if slicerec is None:
- return None
- #return login, None
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
-
- slicerec = self._sql_get_slice_info_from_user(slice_filter)
-
- if slicerec:
- fixed_slicerec_dict = slicerec
- #At this point if there is no login it means
- #record_id_user filter has been used for filtering
- #if login is None :
- ##If theslice record is from iotlab
- #if fixed_slicerec_dict['peer_authority'] is None:
- #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
- #return login, fixed_slicerec_dict
- return fixed_slicerec_dict
- else:
- return None
-
-
-
- def GetSlices(self, slice_filter=None, slice_filter_type=None,
- login=None):
- """Get the slice records from the sfa db and add lease information
- if any.
-
- :param slice_filter: can be the slice hrn or slice record id in the db
- depending on the slice_filter_type.
- :param slice_filter_type: defines the type of the filtering used, Can be
- either 'slice_hrn' or 'record_id'.
- :type slice_filter: string
- :type slice_filter_type: string
- :returns: a slice dict if slice_filter and slice_filter_type
- are specified and a matching entry is found in the db. The result
- is put into a list.Or a list of slice dictionnaries if no filters
- arespecified.
-
- :rtype: list
-
- """
- #login = None
- authorized_filter_types_list = ['slice_hrn', 'record_id_user']
- return_slicerec_dictlist = []
-
- #First try to get information on the slice based on the filter provided
- if slice_filter_type in authorized_filter_types_list:
- fixed_slicerec_dict = self._get_slice_records(slice_filter,
- slice_filter_type)
- # if the slice was not found in the sfa db
- if fixed_slicerec_dict is None:
- return return_slicerec_dictlist
-
- slice_hrn = fixed_slicerec_dict['hrn']
-
- logger.debug(" CORTEXLAB_API \tGetSlices login %s \
- slice record %s slice_filter %s \
- slice_filter_type %s " % (login,
- fixed_slicerec_dict, slice_filter,
- slice_filter_type))
-
-
- #Now we have the slice record fixed_slicerec_dict, get the
- #jobs associated to this slice
- leases_list = []
-
- leases_list = self.GetLeases(login=login)
- #If no job is running or no job scheduled
- #return only the slice record
- if leases_list == [] and fixed_slicerec_dict:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
-
- # if the jobs running don't belong to the user/slice we are looking
- # for
- leases_hrn = [lease['slice_hrn'] for lease in leases_list]
- if slice_hrn not in leases_hrn:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
- #If several experiments for one slice , put the slice record into
- # each lease information dict
- for lease in leases_list:
- slicerec_dict = {}
- logger.debug("CORTEXLAB_API.PY \tGetSlices slice_filter %s \
- \t lease['slice_hrn'] %s"
- % (slice_filter, lease['slice_hrn']))
- if lease['slice_hrn'] == slice_hrn:
- slicerec_dict['experiment_id'] = lease['lease_id']
- #Update lease dict with the slice record
- if fixed_slicerec_dict:
- fixed_slicerec_dict['experiment_id'] = []
- fixed_slicerec_dict['experiment_id'].append(
- slicerec_dict['experiment_id'])
- slicerec_dict.update(fixed_slicerec_dict)
-
- slicerec_dict['slice_hrn'] = lease['slice_hrn']
- slicerec_dict['hrn'] = lease['slice_hrn']
- slicerec_dict['user'] = lease['user']
- slicerec_dict.update(
- {'list_node_ids':
- {'hostname': lease['reserved_nodes']}})
- slicerec_dict.update({'node_ids': lease['reserved_nodes']})
-
-
- return_slicerec_dictlist.append(slicerec_dict)
-
-
- logger.debug("CORTEXLAB_API.PY \tGetSlices \
- slicerec_dict %s return_slicerec_dictlist %s \
- lease['reserved_nodes'] \
- %s" % (slicerec_dict, return_slicerec_dictlist,
- lease['reserved_nodes']))
-
- logger.debug("CORTEXLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s"
- % (return_slicerec_dictlist))
-
- return return_slicerec_dictlist
-
-
- else:
- #Get all slices from the cortexlab sfa database , get the user info
- # as well at the same time put them in dict format
- request = self.api.dbsession().query(RegSlice)
- query_slice_list = \
- request.options(joinedload('reg_researchers')).all()
-
- for record in query_slice_list:
- tmp = record.__dict__
- tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- return_slicerec_dictlist.append(tmp)
-
-
- #Get all the experiments reserved nodes
- leases_list = self.testbed_shell.GetReservedNodes()
-
- for fixed_slicerec_dict in return_slicerec_dictlist:
- slicerec_dict = {}
- #Check if the slice belongs to a cortexlab user
- if fixed_slicerec_dict['peer_authority'] is None:
- owner = fixed_slicerec_dict['hrn'].split(
- ".")[1].split("_")[0]
- else:
- owner = None
- for lease in leases_list:
- if owner == lease['user']:
- slicerec_dict['experiment_id'] = lease['lease_id']
-
- #for reserved_node in lease['reserved_nodes']:
- logger.debug("CORTEXLAB_API.PY \tGetSlices lease %s "
- % (lease))
- slicerec_dict.update(fixed_slicerec_dict)
- slicerec_dict.update({'node_ids':
- lease['reserved_nodes']})
- slicerec_dict.update({'list_node_ids':
- {'hostname':
- lease['reserved_nodes']}})
-
-
- fixed_slicerec_dict.update(slicerec_dict)
-
- logger.debug("CORTEXLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s \t slice_filter %s " \
- %(return_slicerec_dictlist, slice_filter))
-
- return return_slicerec_dictlist
-
- def AddLeases(self, hostname_list, slice_record,
- lease_start_time, lease_duration):
-
- """Creates an experiment on the testbed corresponding to the information
- provided as parameters. Adds the experiment id and the slice hrn in the
- lease table on the additional sfa database so that we are able to know
- which slice has which nodes.
-
- :param hostname_list: list of nodes' OAR hostnames.
- :param slice_record: sfa slice record, must contain login and hrn.
- :param lease_start_time: starting time , unix timestamp format
- :param lease_duration: duration in minutes
-
- :type hostname_list: list
- :type slice_record: dict
- :type lease_start_time: integer
- :type lease_duration: integer
- :returns: experiment_id, can be None if the job request failed.
-
- """
- logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases hostname_list %s \
- slice_record %s lease_start_time %s lease_duration %s "\
- %( hostname_list, slice_record , lease_start_time, \
- lease_duration))
-
- username = slice_record['login']
-
- experiment_id = self.testbed_shell.LaunchExperimentOnTestbed(
- hostname_list,
- slice_record['hrn'],
- lease_start_time, lease_duration,
- username)
- if experiment_id is not None:
- start_time = \
- datetime.fromtimestamp(int(lease_start_time)).\
- strftime(self.testbed_shell.time_format)
- end_time = lease_start_time + lease_duration
-
-
- logger.debug("CORTEXLAB_API \t AddLeases TURN ON LOGGING SQL \
- %s %s %s "%(slice_record['hrn'], experiment_id, end_time))
-
-
- logger.debug("CORTEXLAB_API \r\n \r\n \t AddLeases %s %s %s " \
- %(type(slice_record['hrn']), type(experiment_id),
- type(end_time)))
-
- testbed_xp_row = LeaseTableXP(slice_hrn=slice_record['hrn'],
- experiment_id=experiment_id,
- end_time=end_time)
-
- logger.debug("CORTEXLAB_API \t AddLeases testbed_xp_row %s" \
- %(testbed_xp_row))
- self.api.dbsession().add(testbed_xp_row)
- self.api.dbsession().commit()
-
- logger.debug("CORTEXLAB_API \t AddLeases hostname_list \
- start_time %s " %(start_time))
-
- return experiment_id
-
-
- def GetLeases(self, lease_filter_dict=None, login=None):
- """
- Get the list of leases from testbed with complete information
- about which slice owns which jobs and nodes.
- Two purposes:
- -Fetch all the experiment from the testbed (running, waiting..)
- complete the reservation information with slice hrn
- found in lease_table . If not available in the table,
- assume it is a iotlab slice.
- -Updates the iotlab table, deleting jobs when necessary.
-
- :returns: reservation_list, list of dictionaries with 'lease_id',
- 'reserved_nodes','slice_id', 'state', 'user', 'component_id_list',
- 'slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list
-
- """
-
- unfiltered_reservation_list = self.testbed_shell.GetReservedNodes(login)
-
- reservation_list = []
- #Find the slice associated with this user iotlab ldap uid
- logger.debug(" CORTEXLAB \tGetLeases login %s\
- unfiltered_reservation_list %s "
- % (login, unfiltered_reservation_list))
- #Create user dict first to avoid looking several times for
- #the same user in LDAP SA 27/07/12
- experiment_id_list = []
- jobs_psql_query = self.api.dbsession().query(LeaseTableXP).all()
- jobs_psql_dict = dict([(row.experiment_id, row.__dict__)
- for row in jobs_psql_query])
- #jobs_psql_dict = jobs_psql_dict)
- logger.debug("CORTEXLAB \tGetLeases jobs_psql_dict %s"
- % (jobs_psql_dict))
- jobs_psql_id_list = [row.experiment_id for row in jobs_psql_query]
-
- for resa in unfiltered_reservation_list:
- logger.debug("CORTEXLAB \tGetLeases USER %s"
- % (resa['user']))
- #Construct list of jobs (runing, waiting..) from scheduler
- experiment_id_list.append(resa['lease_id'])
- #If there is information on the job in IOTLAB DB ]
- #(slice used and job id)
- if resa['lease_id'] in jobs_psql_dict:
- job_info = jobs_psql_dict[resa['lease_id']]
- logger.debug("CORTEXLAB \tGetLeases job_info %s"
- % (job_info))
- resa['slice_hrn'] = job_info['slice_hrn']
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a iotlab slice:
- else:
- resa['slice_id'] = hrn_to_urn(self.testbed_shell.root_auth \
- + '.' + resa['user'] + "_slice",
- 'slice')
- resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
-
- resa['component_id_list'] = []
- #Transform the hostnames into urns (component ids)
- for node in resa['reserved_nodes']:
-
- iotlab_xrn = xrn_object(self.testbed_shell.root_auth, node)
- resa['component_id_list'].append(iotlab_xrn.urn)
-
- if lease_filter_dict:
- logger.debug("CORTEXLAB \tGetLeases \
- \r\n leasefilter %s" % ( lease_filter_dict))
-
- # filter_dict_functions = {
- # 'slice_hrn' : IotlabShell.filter_lease_name,
- # 't_from' : IotlabShell.filter_lease_start_time
- # }
- reservation_list = list(unfiltered_reservation_list)
- for filter_type in lease_filter_dict:
- logger.debug("CORTEXLAB \tGetLeases reservation_list %s" \
- % (reservation_list))
- reservation_list = self.testbed_shell.filter_lease(
- reservation_list,filter_type,
- lease_filter_dict[filter_type] )
-
- # Filter the reservation list with a maximum timespan so that the
- # leases and jobs running after this timestamp do not appear
- # in the result leases.
- # if 'start_time' in :
- # if resa['start_time'] < lease_filter_dict['start_time']:
- # reservation_list.append(resa)
-
-
- # if 'name' in lease_filter_dict and \
- # lease_filter_dict['name'] == resa['slice_hrn']:
- # reservation_list.append(resa)
-
-
- if lease_filter_dict is None:
- reservation_list = unfiltered_reservation_list
-
- self.update_experiments_in_lease_table(experiment_id_list,
- jobs_psql_id_list)
-
- logger.debug(" CORTEXLAB.PY \tGetLeases reservation_list %s"
- % (reservation_list))
- return reservation_list
-
- def update_experiments_in_lease_table(self,
- experiment_list_from_testbed, experiment_list_in_db):
- """Cleans the lease_table by deleting expired and cancelled jobs.
-
- Compares the list of experiment ids given by the testbed with the
- experiment ids that are already in the database, deletes the
- experiments that are no longer in the testbed experiment id list.
-
- :param experiment_list_from_testbed: list of experiment ids coming
- from testbed
- :type experiment_list_from_testbed: list
- :param experiment_list_in_db: list of experiment ids from the sfa
- additionnal database.
- :type experiment_list_in_db: list
-
- :returns: None
- """
- #Turn the list into a set
- set_experiment_list_in_db = set(experiment_list_in_db)
-
- kept_experiments = set(experiment_list_from_testbed).intersection(set_experiment_list_in_db)
- logger.debug("\r\n \t update_experiments_in_lease_table \
- experiment_list_in_db %s \r\n \
- experiment_list_from_testbed %s \
- kept_experiments %s "
- % (set_experiment_list_in_db,
- experiment_list_from_testbed, kept_experiments))
- deleted_experiments = set_experiment_list_in_db.difference(
- kept_experiments)
- deleted_experiments = list(deleted_experiments)
- if len(deleted_experiments) > 0:
- request = self.api.dbsession().query(LeaseTableXP)
- request.filter(LeaseTableXP.experiment_id.in_(deleted_experiments)).delete(synchronize_session='fetch')
- self.api.dbsession().commit()
- return
-
-
- def AddSlice(self, slice_record, user_record):
- """
-
- Add slice to the local cortexlab sfa tables if the slice comes
- from a federated site and is not yet in the cortexlab sfa DB,
- although the user has already a LDAP login.
- Called by verify_slice during lease/sliver creation.
-
- :param slice_record: record of slice, must contain hrn, gid, slice_id
- and authority of the slice.
- :type slice_record: dictionary
- :param user_record: record of the user
- :type user_record: RegUser
-
- """
-
- sfa_record = RegSlice(hrn=slice_record['hrn'],
- gid=slice_record['gid'],
- pointer=slice_record['slice_id'],
- authority=slice_record['authority'])
- logger.debug("CORTEXLAB_API.PY AddSlice sfa_record %s user_record %s"
- % (sfa_record, user_record))
- sfa_record.just_created()
- self.api.dbsession().add(sfa_record)
- self.api.dbsession().commit()
- #Update the reg-researcher dependance table
- sfa_record.reg_researchers = [user_record]
- self.api.dbsession().commit()
-
- return
-
- def augment_records_with_testbed_info(self, record_list):
- """
-
- Adds specific testbed info to the records.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- """
- return self.fill_record_info(record_list)
-
- def fill_record_info(self, record_list):
- """
-
- For each SFA record, fill in the iotlab specific and SFA specific
- fields in the record.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- .. warning:: Should not be modifying record_list directly because modi
- fication are kept outside the method's scope. Howerver, there is no
- other way to do it given the way it's called in registry manager.
-
- """
-
- logger.debug("CORTEXLABDRIVER \tfill_record_info records %s "
- % (record_list))
- if not isinstance(record_list, list):
- record_list = [record_list]
-
- try:
- for record in record_list:
-
- if str(record['type']) == 'node':
- # look for node info using GetNodes
- # the record is about one node only
- filter_dict = {'hrn': [record['hrn']]}
- node_info = self.testbed_shell.GetNodes(filter_dict)
- # the node_info is about one node only, but it is formatted
- # as a list
- record.update(node_info[0])
- logger.debug("CORTEXLABDRIVER.PY \t \
- fill_record_info NODE" % (record))
-
- #If the record is a SFA slice record, then add information
- #about the user of this slice. This kind of
- #information is in the Iotlab's DB.
- if str(record['type']) == 'slice':
- if 'reg_researchers' in record and isinstance(record
- ['reg_researchers'],
- list):
- record['reg_researchers'] = \
- record['reg_researchers'][0].__dict__
- record.update(
- {'PI': [record['reg_researchers']['hrn']],
- 'researcher': [record['reg_researchers']['hrn']],
- 'name': record['hrn'],
- 'experiment_id': [],
- 'node_ids': [],
- 'person_ids': [record['reg_researchers']
- ['record_id']],
- # For client_helper.py compatibility
- 'geni_urn': '',
- # For client_helper.py compatibility
- 'keys': '',
- # For client_helper.py compatibility
- 'key_ids': ''})
-
- #Get slice record and job id if any.
- recslice_list = self.GetSlices(
- slice_filter=str(record['hrn']),
- slice_filter_type='slice_hrn')
-
- logger.debug("CORTEXLABDRIVER \tfill_record_info \
- TYPE SLICE RECUSER record['hrn'] %s record['experiment_id']\
- %s " % (record['hrn'], record['experiment_id']))
- del record['reg_researchers']
- try:
- for rec in recslice_list:
- logger.debug("CORTEXLABDRIVER\r\n \t \
- fill_record_info experiment_id %s "
- % (rec['experiment_id']))
-
- record['node_ids'] = [self.testbed_shell.root_auth +
- '.' + hostname for hostname
- in rec['node_ids']]
- except KeyError:
- pass
-
- logger.debug("CORTEXLABDRIVER.PY \t fill_record_info SLICE \
- recslice_list %s \r\n \t RECORD %s \r\n \
- \r\n" % (recslice_list, record))
-
- if str(record['type']) == 'user':
- #The record is a SFA user record.
- #Get the information about his slice from Iotlab's DB
- #and add it to the user record.
- recslice_list = self.GetSlices(
- slice_filter=record['record_id'],
- slice_filter_type='record_id_user')
-
- logger.debug("CORTEXLABDRIVER.PY \t fill_record_info \
- TYPE USER recslice_list %s \r\n \t RECORD %s \r\n"
- % (recslice_list, record))
- #Append slice record in records list,
- #therefore fetches user and slice info again(one more loop)
- #Will update PIs and researcher for the slice
-
- recuser = recslice_list[0]['reg_researchers']
- logger.debug("CORTEXLABDRIVER.PY \t fill_record_info USER \
- recuser %s \r\n \r\n" % (recuser))
- recslice = {}
- recslice = recslice_list[0]
- recslice.update(
- {'PI': [recuser['hrn']],
- 'researcher': [recuser['hrn']],
- 'name': recuser['hrn'],
- 'node_ids': [],
- 'experiment_id': [],
- 'person_ids': [recuser['record_id']]})
- try:
- for rec in recslice_list:
- recslice['experiment_id'].append(rec['experiment_id'])
- except KeyError:
- pass
-
- recslice.update({'type': 'slice',
- 'hrn': recslice_list[0]['hrn']})
-
- #GetPersons takes [] as filters
- user_cortexlab = self.testbed_shell.GetPersons([record])
-
- record.update(user_cortexlab[0])
- #For client_helper.py compatibility
- record.update(
- {'geni_urn': '',
- 'keys': '',
- 'key_ids': ''})
- record_list.append(recslice)
-
- logger.debug("CORTEXLABDRIVER.PY \t \
- fill_record_info ADDING SLICE\
- INFO TO USER records %s" % (record_list))
-
- except TypeError, error:
- logger.log_exc("CORTEXLABDRIVER \t fill_record_info EXCEPTION %s"
- % (error))
-
- return record_list
-
- def sliver_status(self, slice_urn, slice_hrn):
- """
- Receive a status request for slice named urn/hrn
- urn:publicid:IDN+iotlab+nturro_slice hrn iotlab.nturro_slice
- shall return a structure as described in
- http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
- NT : not sure if we should implement this or not, but used by sface.
-
- :param slice_urn: slice urn
- :type slice_urn: string
- :param slice_hrn: slice hrn
- :type slice_hrn: string
-
- """
-
- #First get the slice with the slice hrn
- slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if len(slice_list) == 0:
- raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
-
- #Used for fetching the user info witch comes along the slice info
- one_slice = slice_list[0]
-
- #Make a list of all the nodes hostnames in use for this slice
- slice_nodes_list = []
- slice_nodes_list = one_slice['node_ids']
- #Get all the corresponding nodes details
- nodes_all = self.testbed_shell.GetNodes(
- {'hostname': slice_nodes_list},
- ['node_id', 'hostname', 'site', 'boot_state'])
- nodeall_byhostname = dict([(one_node['hostname'], one_node)
- for one_node in nodes_all])
-
- for single_slice in slice_list:
- #For compatibility
- top_level_status = 'empty'
- result = {}
- result.fromkeys(
- ['geni_urn', 'geni_error', 'cortexlab_login', 'geni_status',
- 'geni_resources'], None)
- # result.fromkeys(\
- # ['geni_urn','geni_error', 'pl_login','geni_status',
- # 'geni_resources'], None)
- # result['pl_login'] = one_slice['reg_researchers'][0].hrn
- result['cortexlab_login'] = one_slice['user']
- logger.debug("Slabdriver - sliver_status Sliver status \
- urn %s hrn %s single_slice %s \r\n "
- % (slice_urn, slice_hrn, single_slice))
-
- if 'node_ids' not in single_slice:
- #No job in the slice
- result['geni_status'] = top_level_status
- result['geni_resources'] = []
- return result
-
- top_level_status = 'ready'
-
- #A job is running on Iotlab for this slice
- # report about the local nodes that are in the slice only
-
- result['geni_urn'] = slice_urn
-
- resources = []
- for node_hostname in single_slice['node_ids']:
- res = {}
- res['cortexlab_hostname'] = node_hostname
- res['cortexlab_boot_state'] = \
- nodeall_byhostname[node_hostname]['boot_state']
-
- sliver_id = Xrn(
- slice_urn, type='slice',
- id=nodeall_byhostname[node_hostname]['node_id']).urn
-
- res['geni_urn'] = sliver_id
- #node_name = node['hostname']
- if nodeall_byhostname[node_hostname]['boot_state'] == 'Alive':
-
- res['geni_status'] = 'ready'
- else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
- logger.debug("CORTEXLABDRIVER \tsliver_statusresources %s res %s "
- % (resources, res))
- return result
-
- def get_user_record(self, hrn):
- """
-
- Returns the user record based on the hrn from the SFA DB .
-
- :param hrn: user's hrn
- :type hrn: string
- :returns: user record from SFA database
- :rtype: RegUser
-
- """
- return self.api.dbsession().query(RegRecord).filter_by(hrn=hrn).first()
-
- def testbed_name(self):
- """
-
- Returns testbed's name.
- :returns: testbed authority name.
- :rtype: string
-
- """
- return self.hrn
-
-
- def _get_requested_leases_list(self, rspec):
- """
- Process leases in rspec depending on the rspec version (format)
- type. Find the lease requests in the rspec and creates
- a lease request list with the mandatory information ( nodes,
- start time and duration) of the valid leases (duration above or
- equal to the iotlab experiment minimum duration).
-
- :param rspec: rspec request received.
- :type rspec: RSpec
- :returns: list of lease requests found in the rspec
- :rtype: list
- """
- requested_lease_list = []
- for lease in rspec.version.get_leases():
- single_requested_lease = {}
- logger.debug("CORTEXLABDRIVER.PY \t \
- _get_requested_leases_list lease %s " % (lease))
-
- if not lease.get('lease_id'):
- if get_authority(lease['component_id']) == \
- self.testbed_shell.root_auth:
- single_requested_lease['hostname'] = \
- xrn_to_hostname(\
- lease.get('component_id').strip())
- single_requested_lease['start_time'] = \
- lease.get('start_time')
- single_requested_lease['duration'] = lease.get('duration')
- #Check the experiment's duration is valid before adding
- #the lease to the requested leases list
- duration_in_seconds = \
- int(single_requested_lease['duration'])
- if duration_in_seconds >= self.testbed_shell.GetMinExperimentDurationInGranularity():
- requested_lease_list.append(single_requested_lease)
-
- return requested_lease_list
-
- @staticmethod
- def _group_leases_by_start_time(requested_lease_list):
- """
- Create dict of leases by start_time, regrouping nodes reserved
- at the same time, for the same amount of time so as to
- define one job on OAR.
-
- :param requested_lease_list: list of leases
- :type requested_lease_list: list
- :returns: Dictionary with key = start time, value = list of leases
- with the same start time.
- :rtype: dictionary
-
- """
-
- requested_xp_dict = {}
- for lease in requested_lease_list:
-
- #In case it is an asap experiment start_time is empty
- if lease['start_time'] == '':
- lease['start_time'] = '0'
-
- if lease['start_time'] not in requested_xp_dict:
- if isinstance(lease['hostname'], str):
- lease['hostname'] = [lease['hostname']]
-
- requested_xp_dict[lease['start_time']] = lease
-
- else:
- job_lease = requested_xp_dict[lease['start_time']]
- if lease['duration'] == job_lease['duration']:
- job_lease['hostname'].append(lease['hostname'])
-
- return requested_xp_dict
-
-
- def _process_requested_xp_dict(self, rspec):
- """
- Turns the requested leases and information into a dictionary
- of requested jobs, grouped by starting time.
-
- :param rspec: RSpec received
- :type rspec : RSpec
- :rtype: dictionary
-
- """
- requested_lease_list = self._get_requested_leases_list(rspec)
- logger.debug("CORTEXLABDRIVER _process_requested_xp_dict \
- requested_lease_list %s" % (requested_lease_list))
- xp_dict = self._group_leases_by_start_time(requested_lease_list)
- logger.debug("CORTEXLABDRIVER _process_requested_xp_dict xp_dict\
- %s" % (xp_dict))
-
- return xp_dict
-
-
-
- def delete(self, slice_urns, options=None):
- """
- Deletes the lease associated with the slice hrn and the credentials
- if the slice belongs to iotlab. Answer to DeleteSliver.
-
- :param slice_urn: urn of the slice
- :type slice_urn: string
-
-
- :returns: 1 if the slice to delete was not found on iotlab,
- True if the deletion was successful, False otherwise otherwise.
-
- .. note:: Should really be named delete_leases because iotlab does
- not have any slivers, but only deals with leases. However,
- SFA api only have delete_sliver define so far. SA 13/05/2013
- .. note:: creds are unused, and are not used either in the dummy driver
- delete_sliver .
- """
- if options is None: options={}
- # collect sliver ids so we can update sliver allocation states after
- # we remove the slivers.
- aggregate = CortexlabAggregate(self)
- slivers = aggregate.get_slivers(slice_urns)
- if slivers:
- # slice_id = slivers[0]['slice_id']
- node_ids = []
- sliver_ids = []
- sliver_jobs_dict = {}
- for sliver in slivers:
- node_ids.append(sliver['node_id'])
- sliver_ids.append(sliver['sliver_id'])
- job_id = sliver['sliver_id'].split('+')[-1].split('-')[0]
- sliver_jobs_dict[job_id] = sliver['sliver_id']
- logger.debug("CORTEXLABDRIVER.PY delete_sliver slivers %s slice_urns %s"
- % (slivers, slice_urns))
- slice_hrn = urn_to_hrn(slice_urns[0])[0]
-
- sfa_slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if not sfa_slice_list:
- return 1
-
- #Delete all leases in the slice
- for sfa_slice in sfa_slice_list:
- logger.debug("CORTEXLABDRIVER.PY delete_sliver slice %s" \
- % (sfa_slice))
- slices = CortexlabSlices(self)
- # determine if this is a peer slice
-
- peer = slices.get_peer(slice_hrn)
-
- logger.debug("CORTEXLABDRIVER.PY delete_sliver peer %s \
- \r\n \t sfa_slice %s " % (peer, sfa_slice))
- testbed_bool_ans = self.testbed_shell.DeleteSliceFromNodes(sfa_slice)
- for job_id in testbed_bool_ans:
- # if the job has not been successfully deleted
- # don't delete the associated sliver
- # remove it from the sliver list
- if testbed_bool_ans[job_id] is False:
- sliver = sliver_jobs_dict[job_id]
- sliver_ids.remove(sliver)
- try:
-
- dbsession = self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
- except :
- logger.log_exc("CORTEXLABDRIVER.PY delete error ")
-
- # prepare return struct
- geni_slivers = []
- for sliver in slivers:
- geni_slivers.append(
- {'geni_sliver_urn': sliver['sliver_id'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': datetime_to_string(utcparse(sliver['expires']))})
- return geni_slivers
-
- def list_slices(self, creds, options):
- """Answer to ListSlices.
-
- List slices belonging to iotlab, returns slice urns list.
- No caching used. Options unused but are defined in the SFA method
- api prototype.
-
- :returns: slice urns list
- :rtype: list
-
- .. note:: creds are unused- SA 12/12/13
- """
- # look in cache first
- #if self.cache:
- #slices = self.cache.get('slices')
- #if slices:
- #logger.debug("PlDriver.list_slices returns from cache")
- #return slices
-
- # get data from db
-
- slices = self.GetSlices()
- logger.debug("CORTEXLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n"
- % (slices))
- slice_hrns = [iotlab_slice['hrn'] for iotlab_slice in slices]
-
- slice_urns = [hrn_to_urn(slice_hrn, 'slice')
- for slice_hrn in slice_hrns]
-
- # cache the result
- #if self.cache:
- #logger.debug ("IotlabDriver.list_slices stores value in cache")
- #self.cache.add('slices', slice_urns)
-
- return slice_urns
-
-
- def register(self, sfa_record, hrn, pub_key):
- """
- Adding new user, slice, node or site should not be handled
- by SFA.
-
- ..warnings:: should not be used. Different components are in charge of
- doing this task. Adding nodes = OAR
- Adding users = LDAP Iotlab
- Adding slice = Import from LDAP users
- Adding site = OAR
-
- :param sfa_record: record provided by the client of the
- Register API call.
- :type sfa_record: dict
- :param pub_key: public key of the user
- :type pub_key: string
-
- .. note:: DOES NOTHING. Returns -1.
-
- """
- return -1
-
- def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
- """
- No site or node record update allowed in Iotlab. The only modifications
- authorized here are key deletion/addition on an existing user and
- password change. On an existing user, CAN NOT BE MODIFIED: 'first_name',
- 'last_name', 'email'. DOES NOT EXIST IN SENSLAB: 'phone', 'url', 'bio',
- 'title', 'accepted_aup'. A slice is bound to its user, so modifying the
- user's ssh key should nmodify the slice's GID after an import procedure.
-
- :param old_sfa_record: what is in the db for this hrn
- :param new_sfa_record: what was passed to the update call
- :param new_key: the new user's public key
- :param hrn: the user's sfa hrn
- :type old_sfa_record: dict
- :type new_sfa_record: dict
- :type new_key: string
- :type hrn: string
-
- TODO: needs review
- .. warning:: SA 12/12/13 - Removed. should be done in iotlabimporter
- since users, keys and slice are managed by the LDAP.
-
- """
- # pointer = old_sfa_record['pointer']
- # old_sfa_record_type = old_sfa_record['type']
-
- # # new_key implemented for users only
- # if new_key and old_sfa_record_type not in ['user']:
- # raise UnknownSfaType(old_sfa_record_type)
-
- # if old_sfa_record_type == "user":
- # update_fields = {}
- # all_fields = new_sfa_record
- # for key in all_fields.keys():
- # if key in ['key', 'password']:
- # update_fields[key] = all_fields[key]
-
- # if new_key:
- # # must check this key against the previous one if it exists
- # persons = self.testbed_shell.GetPersons([old_sfa_record])
- # person = persons[0]
- # keys = [person['pkey']]
- # #Get all the person's keys
- # keys_dict = self.GetKeys(keys)
-
- # # Delete all stale keys, meaning the user has only one key
- # #at a time
- # #TODO: do we really want to delete all the other keys?
- # #Is this a problem with the GID generation to have multiple
- # #keys? SA 30/05/13
- # key_exists = False
- # if key in keys_dict:
- # key_exists = True
- # else:
- # #remove all the other keys
- # for key in keys_dict:
- # self.testbed_shell.DeleteKey(person, key)
- # self.testbed_shell.AddPersonKey(
- # person, {'sshPublicKey': person['pkey']},
- # {'sshPublicKey': new_key})
- logger.warning ("UNDEFINED - Update should be done by the \
- iotlabimporter")
- return True
-
-
- def remove(self, sfa_record):
- """
-
- Removes users only. Mark the user as disabled in LDAP. The user and his
- slice are then deleted from the db by running an import on the registry.
-
- :param sfa_record: record is the existing sfa record in the db
- :type sfa_record: dict
-
- ..warning::As fas as the slice is concerned, here only the leases are
- removed from the slice. The slice is record itself is not removed
- from the db.
-
- TODO: needs review
-
- TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
-
- TODO: return boolean for the slice part
- """
- sfa_record_type = sfa_record['type']
- hrn = sfa_record['hrn']
- if sfa_record_type == 'user':
-
- #get user from iotlab ldap
- person = self.testbed_shell.GetPersons(sfa_record)
- #No registering at a given site in Iotlab.
- #Once registered to the LDAP, all iotlab sites are
- #accesible.
- if person:
- #Mark account as disabled in ldap
- return self.testbed_shell.DeletePerson(sfa_record)
-
- elif sfa_record_type == 'slice':
- if self.GetSlices(slice_filter=hrn,
- slice_filter_type='slice_hrn'):
- ret = self.testbed_shell.DeleteSlice(sfa_record)
- return True
-
- def check_sliver_credentials(self, creds, urns):
- """Check that the sliver urns belongs to the slice specified in the
- credentials.
-
- :param urns: list of sliver urns.
- :type urns: list.
- :param creds: slice credentials.
- :type creds: Credential object.
-
-
- """
- # build list of cred object hrns
- slice_cred_names = []
- for cred in creds:
- slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
- slicename = IotlabXrn(xrn=slice_cred_hrn).iotlab_slicename()
- slice_cred_names.append(slicename)
-
- # look up slice name of slivers listed in urns arg
-
- slice_ids = []
- for urn in urns:
- sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
- try:
- slice_ids.append(int(sliver_id_parts[0]))
- except ValueError:
- pass
-
- if not slice_ids:
- raise Forbidden("sliver urn not provided")
-
- slices = self.GetSlices(slice_ids)
- sliver_names = [single_slice['name'] for single_slice in slices]
-
- # make sure we have a credential for every specified sliver
- for sliver_name in sliver_names:
- if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
- raise Forbidden(msg)
-
- ########################################
- ########## aggregate oriented
- ########################################
-
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
- def aggregate_version(self):
- """
-
- Returns the testbed's supported rspec advertisement and request
- versions.
- :returns: rspec versions supported ad a dictionary.
- :rtype: dict
-
- """
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- return {
- 'testbed': self.testbed_name(),
- 'geni_request_rspec_versions': request_rspec_versions,
- 'geni_ad_rspec_versions': ad_rspec_versions}
-
-
-
- # first 2 args are None in case of resource discovery
- def list_resources (self, version=None, options=None):
- if options is None: options={}
- aggregate = CortexlabAggregate(self)
- rspec = aggregate.list_resources(version=version, options=options)
- return rspec
-
-
- def describe(self, urns, version, options=None):
- if options is None: options={}
- aggregate = CortexlabAggregate(self)
- return aggregate.describe(urns, version=version, options=options)
-
- def status (self, urns, options=None):
- if options is None: options={}
- aggregate = CortexlabAggregate(self)
- desc = aggregate.describe(urns, version='GENI 3')
- status = {'geni_urn': desc['geni_urn'],
- 'geni_slivers': desc['geni_slivers']}
- return status
-
-
- def allocate (self, urn, rspec_string, expiration, options=None):
- if options is None: options={}
- xrn = Xrn(urn)
- aggregate = CortexlabAggregate(self)
-
- slices = CortexlabSlices(self)
- peer = slices.get_peer(xrn.get_hrn())
- sfa_peer = slices.get_sfa_peer(xrn.get_hrn())
-
- slice_record = None
- users = options.get('geni_users', [])
-
- sfa_users = options.get('sfa_users', [])
- if sfa_users:
- slice_record = sfa_users[0].get('slice_record', [])
-
- # parse rspec
- rspec = RSpec(rspec_string)
- # requested_attributes = rspec.version.get_slice_attributes()
-
- # ensure site record exists
-
- # ensure slice record exists
-
- current_slice = slices.verify_slice(xrn.hrn, slice_record, sfa_peer)
- logger.debug("IOTLABDRIVER.PY \t ===============allocate \t\
- \r\n \r\n current_slice %s" % (current_slice))
- # ensure person records exists
-
- # oui c'est degueulasse, le slice_record se retrouve modifie
- # dans la methode avec les infos du user, els infos sont propagees
- # dans verify_slice_leases
- persons = slices.verify_persons(xrn.hrn, slice_record, users,
- options=options)
- # ensure slice attributes exists
- # slices.verify_slice_attributes(slice, requested_attributes,
- # options=options)
-
- # add/remove slice from nodes
- requested_xp_dict = self._process_requested_xp_dict(rspec)
-
- logger.debug("IOTLABDRIVER.PY \tallocate requested_xp_dict %s "
- % (requested_xp_dict))
- request_nodes = rspec.version.get_nodes_with_slivers()
- nodes_list = []
- for start_time in requested_xp_dict:
- lease = requested_xp_dict[start_time]
- for hostname in lease['hostname']:
- nodes_list.append(hostname)
-
- # nodes = slices.verify_slice_nodes(slice_record,request_nodes, peer)
- logger.debug("IOTLABDRIVER.PY \tallocate nodes_list %s slice_record %s"
- % (nodes_list, slice_record))
-
- # add/remove leases
- rspec_requested_leases = rspec.version.get_leases()
- leases = slices.verify_slice_leases(slice_record,
- requested_xp_dict, peer)
- logger.debug("IOTLABDRIVER.PY \tallocate leases %s \
- rspec_requested_leases %s" % (leases,
- rspec_requested_leases))
- # update sliver allocations
- for hostname in nodes_list:
- client_id = hostname
- node_urn = xrn_object(self.testbed_shell.root_auth, hostname).urn
- component_id = node_urn
- slice_urn = current_slice['reg-urn']
- for lease in leases:
- if hostname in lease['reserved_nodes']:
- index = lease['reserved_nodes'].index(hostname)
- sliver_hrn = '%s.%s-%s' % (self.hrn, lease['lease_id'],
- lease['resource_ids'][index] )
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
- record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
- component_id=component_id,
- slice_urn = slice_urn,
- allocation_state='geni_allocated')
- record.sync(self.api.dbsession())
-
- return aggregate.describe([xrn.get_urn()], version=rspec.version)
-
- def provision(self, urns, options=None):
- if options is None: options={}
- # update users
- slices = CortexlabSlices(self)
- aggregate = CortexlabAggregate(self)
- slivers = aggregate.get_slivers(urns)
- current_slice = slivers[0]
- peer = slices.get_peer(current_slice['hrn'])
- sfa_peer = slices.get_sfa_peer(current_slice['hrn'])
- users = options.get('geni_users', [])
- # persons = slices.verify_persons(current_slice['hrn'],
- # current_slice, users, peer, sfa_peer, options=options)
- # slices.handle_peer(None, None, persons, peer)
- # update sliver allocation states and set them to geni_provisioned
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',
- dbsession)
- version_manager = VersionManager()
- rspec_version = version_manager.get_version(options[
- 'geni_rspec_version'])
- return self.describe(urns, rspec_version, options=options)
-
-
-
+++ /dev/null
-"""
-File used to handle all the nodes querying:
-* get nodes list along with their properties with get_all_nodes
-
-* get sites and their properties with get_sites.
-
-* get nodes involved in leases sorted by lease id, with get_reserved_nodes.
-
-* create a lease (schedule an experiment) with schedule_experiment.
-
-* delete a lease with delete_experiment.
-
-"""
-
-class CortexlabQueryNodes:
- def __init__(self):
-
- pass
-
- def get_all_nodes(self, node_filter_dict=None, return_fields_list=None):
- """
- Get all the nodes and their properties. Called by GetNodes.
- Filtering on nodes properties can be done here or in GetNodes.
- Search for specific nodes if some filters are specified. Returns all
- the nodes properties if return_fields_list is None.
-
-
- :param node_filter_dict: dictionary of lists with node properties. For
- instance, if you want to look for a specific node with its hrn,
- the node_filter_dict should be {'hrn': [hrn_of_the_node]}
- :type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
- :type return_fields_list: list
- :returns: list of dictionaries with node properties
- :rtype: list
-
- TODO: Define which properties have to be listed here. Useful ones:
- node architecture, radio type, position (x,y,z)
- """
- node_dict_list = None
- # Get the nodes here, eventually filter here
- # See iotlabapi.py GetNodes to get the filtering (node_filter_dict and
- # return_fields_list ) part, if necessary
- # Format used in iotlab
- node_dict_list = [
- {'hrn': 'iotlab.wsn430-11.devlille.iot-lab.info',
- 'archi': 'wsn430', 'mobile': 'True',
- 'hostname': 'wsn430-11.devlille.iot-lab.info',
- 'site': 'devlille', 'mobility_type': 'None',
- 'boot_state': 'Suspected',
- 'node_id': 'wsn430-11.devlille.iot-lab.info',
- 'radio': 'cc2420', 'posx': '2.3', 'posy': '2.3',
- 'node_number': 11, 'posz': '1'},
- {'hrn': 'iotlab.wsn430-10.devlille.iot-lab.info',
- 'archi': 'wsn430', 'mobile': 'True',
- 'hostname': 'wsn430-10.devlille.iot-lab.info',
- 'site': 'devlille', 'mobility_type': 'None',
- 'boot_state': 'Alive', 'node_id': 'wsn430-10.devlille.iot-lab.info',
- 'radio': 'cc2420', 'posx': '1.3', 'posy': '2.3', 'node_number': 10,
- 'posz': '1'},
- {'hrn': 'iotlab.wsn430-1.devlille.iot-lab.info',
- 'archi': 'wsn430', 'mobile': 'False',
- 'hostname': 'wsn430-1.devlille.iot-lab.info',
- 'site': 'devlille', 'mobility_type': 'None',
- 'boot_state': 'Alive', 'node_id': 'wsn430-1.devlille.iot-lab.info',
- 'radio': 'cc2420', 'posx': '0.3', 'posy': '0.3', 'node_number': 1,
- 'posz': '1'} ]
- return node_dict_list
-
-
-
-
- def get_sites(self, site_filter_name_list=None, return_fields_list=None):
-
- """Get the different cortexlab sites and for each sites, the nodes
- hostnames on this site.
-
- :param site_filter_name_list: used to specify specific sites
- :param return_fields_list: fields that has to be returned
- :type site_filter_name_list: list
- :type return_fields_list: list
- :rtype: list of dictionaries
- """
- site_dict_list = None
- site_dict_list = [
- {'address_ids': [], 'slice_ids': [], 'name': 'iotlab',
- 'node_ids': [u'wsn430-11.devlille.iot-lab.info',
- u'wsn430-10.devlille.iot-lab.info', u'wsn430-1.devlille.iot-lab.info'],
- 'url': 'https://portal.senslab.info', 'person_ids': [],
- 'site_tag_ids': [], 'enabled': True, 'site': 'devlille',
- 'longitude': '- 2.10336', 'pcu_ids': [], 'max_slivers': None,
- 'max_slices': None, 'ext_consortium_id': None, 'date_created': None,
- 'latitude': '48.83726', 'is_public': True, 'peer_site_id': None,
- 'peer_id': None, 'abbreviated_name': 'iotlab'}]
- # list of dict with mandatory keys ['name', 'node_ids', 'longitude',
- # 'site' ]. Value for key node_ids is a hostname list.
- # See iotlabapi.py GetSites to get the filtering
- return site_dict_list
-
-
- def get_reserved_nodes(self, username):
- """Get list of leases. Get the leases for the username if specified,
- otherwise get all the leases.
- :param username: user's LDAP login
- :type username: string
- :returns: list of reservations dict
- :rtype: list of dictionaries
-
- """
- reserved_nodes_list_dict = None
-
- reserved_nodes_list_dict = [{'lease_id': 1658,
- 'reserved_nodes': [ 'wsn430-11.devlille.iot-lab.info'], 'state':
- 'Waiting', 'user': 'avakian', 'resource_ids': [11],
- 't_from': 1412938800, 't_until': 1412942640}]
-
- return reserved_nodes_list_dict
-
- def schedule_experiment(self, lease_dict):
- """Schedule/ run an experiment based on the information provided in the
- lease dictionary.
-
- :param lease_dict: contains lease_start_time, lease_duration,
- added_nodes, slice_name , slice_user, grain:
- :type lease_dict: dictionary
- :rtype: dict
- """
- answer = {}
- answer['id'] = None #experiment id
- answer['msg'] = None #message in case of error
-
-
- answer['id'] = 1659
-
- # Launch the experiment here
-
- return answer
-
- def delete_experiment(self, experiment_id, username):
- """
- Delete the experiment designated by its experiment id and its
- user.
- TODO: If the username is not necessary to delete the lease, then you can
- remove it from the parameters, given that you propagate the changes
-
- :param experiment_id: experiment identifier
- :type experiment_id : integer
- :param username: user's LDAP login
- :type experiment_id: integer
- :type username: string
- :returns: dict with delete status {'status': True of False}
- :rtype: dict
- """
- # Delete the experiment here. Ret['status'] should be True or False
- # depending if the delete was effective or not.
- ret = {}
- ret['status'] = None
- return ret
+++ /dev/null
-"""
-File defining classes to handle the table in the iotlab dedicated database.
-"""
-
-from sqlalchemy import create_engine
-from sqlalchemy.orm import sessionmaker
-# from sfa.util.config import Config
-from sfa.util.sfalogging import logger
-
-from sqlalchemy import Column, Integer, String
-from sqlalchemy import Table, MetaData
-from sqlalchemy.ext.declarative import declarative_base
-
-# from sqlalchemy.dialects import postgresql
-
-from sqlalchemy.exc import NoSuchTableError
-
-
-#Dict holding the columns names of the table as keys
-#and their type, used for creation of the table
-slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
- CASCADE ON UPDATE CASCADE', 'oar_job_id': 'integer DEFAULT -1',
- 'record_id_slice': 'integer', 'slice_hrn': 'text NOT NULL'}
-
-#Dict with all the specific iotlab tables
-tablenames_dict = {'iotlab_xp': slice_table}
-
-
-IotlabBase = declarative_base()
-
-
-class IotlabXP (IotlabBase):
- """ SQL alchemy class to manipulate the rows of the slice_iotlab table in
- iotlab_sfa database. Handles the records representation and creates the
- table if it does not exist yet.
-
- """
- __tablename__ = 'iotlab_xp'
-
- slice_hrn = Column(String)
- job_id = Column(Integer, primary_key=True)
- end_time = Column(Integer, nullable=False)
-
- def __init__(self, slice_hrn=None, job_id=None, end_time=None):
- """
- Defines a row of the slice_iotlab table
- """
- if slice_hrn:
- self.slice_hrn = slice_hrn
- if job_id:
- self.job_id = job_id
- if end_time:
- self.end_time = end_time
-
- def __repr__(self):
- """Prints the SQLAlchemy record to the format defined
- by the function.
- """
- result = "<iotlab_xp : slice_hrn = %s , job_id %s end_time = %s" \
- % (self.slice_hrn, self.job_id, self.end_time)
- result += ">"
- return result
-
-
-class IotlabDB(object):
- """ SQL Alchemy connection class.
- From alchemy.py
- """
- # Stores the unique Singleton instance-
- _connection_singleton = None
- # defines the database name
- dbname = "iotlab_sfa"
-
- class Singleton:
- """
- Class used with this Python singleton design pattern to allow the
- definition of one single instance of iotlab db session in the whole
- code. Wherever a connection to the database is needed, this class
- returns the same instance every time. Removes the need for global
- variable throughout the code.
- """
-
- def __init__(self, config, debug=False):
- self.iotlab_engine = None
- self.iotlab_session = None
- self.url = None
- self.create_iotlab_engine(config, debug)
- self.session()
-
- def create_iotlab_engine(self, config, debug=False):
- """Creates the SQLAlchemy engine, which is the starting point for
- any SQLAlchemy application.
- :param config: configuration object created by SFA based on the
- configuration file in /etc
- :param debug: if set to true, echo and echo pool will be set to true
- as well. If echo is True, all statements as well as a repr() of
- their parameter lists to the engines logger, which defaults to
- sys.stdout. If echo_pool is True, the connection pool will log all
- checkouts/checkins to the logging stream. A python logger can be
- used to configure this logging directly but so far it has not been
- configured. Refer to sql alchemy engine documentation.
-
- :type config: Config instance (sfa.util.config)
- :type debug: bool
-
- """
-
- if debug is True:
- l_echo_pool = True
- l_echo = True
- else:
- l_echo_pool = False
- l_echo = False
- # the former PostgreSQL.py used the psycopg2 directly and was doing
- #self.connection.set_client_encoding("UNICODE")
- # it's unclear how to achieve this in sqlalchemy, nor if it's needed
- # at all
- # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
- # we indeed have /var/lib/pgsql/data/postgresql.conf where
- # this setting is unset, it might be an angle to tweak that if need
- # be try a unix socket first
- # - omitting the hostname does the trick
- unix_url = "postgresql+psycopg2://%s:%s@:%s/%s" \
- % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
- config.SFA_DB_PORT, IotlabDB.dbname)
-
- # the TCP fallback method
- tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s" \
- % (config.SFA_DB_USER, config.SFA_DB_PASSWORD,
- config.SFA_DB_HOST, config.SFA_DB_PORT, IotlabDB.dbname)
-
- for url in [unix_url, tcp_url]:
- try:
- self.iotlab_engine = create_engine(
- url, echo_pool=l_echo_pool, echo=l_echo)
- self.check()
- self.url = url
- return
- except:
- pass
- self.iotlab_engine = None
-
- raise Exception("Could not connect to database")
-
- def check(self):
- """ Check if a table exists by trying a selection
- on the table.
-
- """
- self.iotlab_engine.execute("select 1").scalar()
-
-
- def session(self):
- """
- Creates a SQLalchemy session. Once the session object is created
- it should be used throughout the code for all the operations on
- tables for this given database.
-
- """
- if self.iotlab_session is None:
- Session = sessionmaker()
- self.iotlab_session = Session(bind=self.iotlab_engine)
- return self.iotlab_session
-
- def close_session(self):
- """
- Closes connection to database.
-
- """
- if self.iotlab_session is None:
- return
- self.iotlab_session.close()
- self.iotlab_session = None
-
-
- def update_jobs_in_iotlabdb(self, job_oar_list, jobs_psql):
- """ Cleans the iotlab db by deleting expired and cancelled jobs.
-
- Compares the list of job ids given by OAR with the job ids that
- are already in the database, deletes the jobs that are no longer in
- the OAR job id list.
-
- :param job_oar_list: list of job ids coming from OAR
- :type job_oar_list: list
- :param job_psql: list of job ids from the database.
- :type job_psql: list
-
- :returns: None
- """
- #Turn the list into a set
- set_jobs_psql = set(jobs_psql)
-
- kept_jobs = set(job_oar_list).intersection(set_jobs_psql)
- logger.debug("\r\n \t update_jobs_in_iotlabdb jobs_psql %s \r\n \
- job_oar_list %s kept_jobs %s "
- % (set_jobs_psql, job_oar_list, kept_jobs))
- deleted_jobs = set_jobs_psql.difference(kept_jobs)
- deleted_jobs = list(deleted_jobs)
- if len(deleted_jobs) > 0:
- self.iotlab_session.query(IotlabXP).filter(IotlabXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
- self.iotlab_session.commit()
- return
-
- def __init__(self, config, debug=False):
- self.sl_base = IotlabBase
-
- # Check whether we already have an instance
- if IotlabDB._connection_singleton is None:
- IotlabDB._connection_singleton = IotlabDB.Singleton(config, debug)
-
- # Store instance reference as the only member in the handle
- self._EventHandler_singleton = IotlabDB._connection_singleton
-
- def __getattr__(self, aAttr):
- """
- Delegate access to implementation.
-
- :param aAttr: Attribute wanted.
- :returns: Attribute
- """
- return getattr(self._connection_singleton, aAttr)
-
-
-
- # def __setattr__(self, aAttr, aValue):
- # """Delegate access to implementation.
-
- # :param attr: Attribute wanted.
- # :param value: Vaule to be set.
- # :return: Result of operation.
- # """
- # return setattr(self._connection_singleton, aAttr, aValue)
-
- def exists(self, tablename):
- """
- Checks if the table specified as tablename exists.
- :param tablename: name of the table in the db that has to be checked.
- :type tablename: string
- :returns: True if the table exists, False otherwise.
- :rtype: bool
-
- """
- metadata = MetaData(bind=self.iotlab_engine)
- try:
- table = Table(tablename, metadata, autoload=True)
- return True
-
- except NoSuchTableError:
- logger.log_exc("IOTLABPOSTGRES tablename %s does not exist"
- % (tablename))
- return False
-
- def createtable(self):
- """
- Creates all the table sof the engine.
- Uses the global dictionnary holding the tablenames and the table schema.
-
- """
-
- logger.debug("IOTLABPOSTGRES createtable \
- IotlabBase.metadata.sorted_tables %s \r\n engine %s"
- % (IotlabBase.metadata.sorted_tables, self.iotlab_engine))
- IotlabBase.metadata.create_all(self.iotlab_engine)
- return
+++ /dev/null
-"""
-File containing the CortexlabShell, used to interact with nodes, users,
-slices, leases and keys, as well as the dedicated iotlab database and table,
-holding information about which slice is running which job.
-
-"""
-from datetime import datetime
-
-from sfa.util.sfalogging import logger
-from sfa.util.sfatime import SFATIME_FORMAT
-
-from sfa.iotlab.iotlabpostgres import LeaseTableXP
-from sfa.cortexlab.LDAPapi import LDAPapi
-
-
-
-from sfa.iotlab.iotlabxrn import xrn_object
-from sfa.cortexlab.cortexlabnodes import CortexlabQueryNodes
-
-class CortexlabShell():
- """ Class enabled to use LDAP and OAR api calls. """
-
- _MINIMUM_DURATION = 10 # 10 units of granularity 60 s, 10 mins
-
- def __init__(self, config):
- """Creates an instance of OARrestapi and LDAPapi which will be used to
- issue calls to OAR or LDAP methods.
- Set the time format and the testbed granularity used for OAR
- reservation and leases.
-
- :param config: configuration object from sfa.util.config
- :type config: Config object
- """
-
- self.query_sites = CortexlabQueryNodes()
- self.ldap = LDAPapi()
- self.time_format = SFATIME_FORMAT
- self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
- self.grain = 60 # 10 mins lease minimum, 60 sec granularity
- #import logging, logging.handlers
- #from sfa.util.sfalogging import _SfaLogger
- #sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', \
- #level=logging.DEBUG)
- return
-
- @staticmethod
- def GetMinExperimentDurationInGranularity():
- """ Returns the minimum allowed duration for an experiment on the
- testbed. In seconds.
-
- """
- return CortexlabShell._MINIMUM_DURATION
-
- #TODO : Handling OR request in make_ldap_filters_from_records
- #instead of the for loop
- #over the records' list
- def GetPersons(self, person_filter=None):
- """
- Get the enabled users and their properties from Cortexlab LDAP.
- If a filter is specified, looks for the user whose properties match
- the filter, otherwise returns the whole enabled users'list.
-
- :param person_filter: Must be a list of dictionnaries with users
- properties when not set to None.
- :type person_filter: list of dict
-
- :returns: Returns a list of users whose accounts are enabled
- found in ldap.
- :rtype: list of dicts
-
- """
- logger.debug("CORTEXLAB_API \tGetPersons person_filter %s"
- % (person_filter))
- person_list = []
- if person_filter and isinstance(person_filter, list):
- #If we are looking for a list of users (list of dict records)
- #Usually the list contains only one user record
- for searched_attributes in person_filter:
-
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person = self.ldap.LdapFindUser(searched_attributes,
- is_user_enabled=True)
- #If a person was found, append it to the list
- if person:
- person_list.append(person)
-
- #If the list is empty, return None
- if len(person_list) is 0:
- person_list = None
-
- else:
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person_list = self.ldap.LdapFindUser(is_user_enabled=True)
-
- return person_list
-
-
-
- def DeleteOneLease(self, lease_id, username):
- """
-
- Deletes the lease with the specified lease_id and username on OAR by
- posting a delete request to OAR.
-
- :param lease_id: Reservation identifier.
- :param username: user's iotlab login in LDAP.
- :type lease_id: Depends on what tou are using, could be integer or
- string
- :type username: string
-
- :returns: dictionary with the lease id and if delete has been successful
- (True) or no (False)
- :rtype: dict
-
- """
-
- # Here delete the lease specified
- answer = self.query_sites.delete_experiment(lease_id, username)
-
- # If the username is not necessary to delete the lease, then you can
- # remove it from the parameters, given that you propagate the changes
- # Return delete status so that you know if the delete has been
- # successuf or not
-
-
- if answer['status'] is True:
- ret = {lease_id: True}
- else:
- ret = {lease_id: False}
- logger.debug("CORTEXLAB_API \DeleteOneLease lease_id %s \r\n answer %s \
- username %s" % (lease_id, answer, username))
- return ret
-
-
-
- def GetNodesCurrentlyInUse(self):
- """Returns a list of all the nodes involved in a currently running
- experiment (and only the one not available at the moment the call to
- this method is issued)
- :rtype: list of nodes hostnames.
- """
- node_hostnames_list = []
- return node_hostnames_list
-
-
- def GetReservedNodes(self, username=None):
- """ Get list of leases. Get the leases for the username if specified,
- otherwise get all the leases. Finds the nodes hostnames for each
- OAR node identifier.
- :param username: user's LDAP login
- :type username: string
- :returns: list of reservations dict
- :rtype: dict list
- """
-
- #Get the nodes in use and the reserved nodes
- mandatory_sfa_keys = ['reserved_nodes','lease_id']
- reservation_dict_list = \
- self.query_sites.get_reserved_nodes(username = username)
-
- if len(reservation_dict_list) == 0:
- return []
-
- else:
- # Ensure mandatory keys are in the dict
- if not self.ensure_format_is_valid(reservation_dict_list,
- mandatory_sfa_keys):
- raise KeyError, "GetReservedNodes : Missing SFA mandatory keys"
-
-
- return reservation_dict_list
-
- @staticmethod
- def ensure_format_is_valid(list_dictionary_to_check, mandatory_keys_list):
- for entry in list_dictionary_to_check:
- if not all (key in entry for key in mandatory_keys_list):
- return False
- return True
-
- def GetNodes(self, node_filter_dict=None, return_fields_list=None):
- """
-
- Make a list of cortexlab nodes and their properties from information
- given by ?. Search for specific nodes if some filters are
- specified. Nodes properties returned if no return_fields_list given:
- 'hrn','archi','mobile','hostname','site','boot_state','node_id',
- 'radio','posx','posy,'posz'.
-
- :param node_filter_dict: dictionnary of lists with node properties. For
- instance, if you want to look for a specific node with its hrn,
- the node_filter_dict should be {'hrn': [hrn_of_the_node]}
- :type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
- :type return_fields_list: list
- :returns: list of dictionaries with node properties. Mandatory
- properties hrn, site, hostname. Complete list (iotlab) ['hrn',
- 'archi', 'mobile', 'hostname', 'site', 'mobility_type',
- 'boot_state', 'node_id','radio', 'posx', 'posy', 'oar_id', 'posz']
- Radio, archi, mobile and position are useful to help users choose
- the appropriate nodes.
- :rtype: list
-
- :TODO: FILL IN THE BLANKS
- """
-
- # Here get full dict of nodes with all their properties.
- mandatory_sfa_keys = ['hrn', 'site', 'hostname']
- node_list_dict = self.query_sites.get_all_nodes(node_filter_dict,
- return_fields_list)
-
- if len(node_list_dict) == 0:
- return_node_list = []
-
- else:
- # Ensure mandatory keys are in the dict
- if not self.ensure_format_is_valid(node_list_dict,
- mandatory_sfa_keys):
- raise KeyError, "GetNodes : Missing SFA mandatory keys"
-
-
- return_node_list = node_list_dict
- return return_node_list
-
-
-
-
- def GetSites(self, site_filter_name_list=None, return_fields_list=None):
- """Returns the list of Cortexlab's sites with the associated nodes and
- the sites' properties as dictionaries. Used in import.
-
- Site properties:
- ['address_ids', 'slice_ids', 'name', 'node_ids', 'url', 'person_ids',
- 'site_tag_ids', 'enabled', 'site', 'longitude', 'pcu_ids',
- 'max_slivers', 'max_slices', 'ext_consortium_id', 'date_created',
- 'latitude', 'is_public', 'peer_site_id', 'peer_id', 'abbreviated_name']
- can be empty ( []): address_ids, slice_ids, pcu_ids, person_ids,
- site_tag_ids
-
- :param site_filter_name_list: used to specify specific sites
- :param return_fields_list: field that has to be returned
- :type site_filter_name_list: list
- :type return_fields_list: list
- :rtype: list of dicts
-
- """
- site_list_dict = self.query_sites.get_sites(site_filter_name_list,
- return_fields_list)
-
- mandatory_sfa_keys = ['name', 'node_ids', 'longitude','site' ]
-
- if len(site_list_dict) == 0:
- return_site_list = []
-
- else:
- # Ensure mandatory keys are in the dict
- if not self.ensure_format_is_valid(site_list_dict,
- mandatory_sfa_keys):
- raise KeyError, "GetSites : Missing sfa mandatory keys"
-
- return_site_list = site_list_dict
- return return_site_list
-
-
- #TODO : Check rights to delete person
- def DeletePerson(self, person_record):
- """Disable an existing account in cortexlab LDAP.
-
- Users and techs can only delete themselves. PIs can only
- delete themselves and other non-PIs at their sites.
- ins can delete anyone.
-
- :param person_record: user's record
- :type person_record: dict
- :returns: True if successful, False otherwise.
- :rtype: boolean
-
- .. todo:: CHECK THAT ONLY THE USER OR ADMIN CAN DEL HIMSELF.
- """
- #Disable user account in iotlab LDAP
- ret = self.ldap.LdapMarkUserAsDeleted(person_record)
- logger.warning("CORTEXLAB_API DeletePerson %s " % (person_record))
- return ret['bool']
-
- def DeleteSlice(self, slice_record):
- """Deletes the specified slice and kills the jobs associated with
- the slice if any, using DeleteSliceFromNodes.
-
- :param slice_record: record of the slice, must contain experiment_id, user
- :type slice_record: dict
- :returns: True if all the jobs in the slice have been deleted,
- or the list of jobs that could not be deleted otherwise.
- :rtype: list or boolean
-
- .. seealso:: DeleteSliceFromNodes
-
- """
- ret = self.DeleteSliceFromNodes(slice_record)
- delete_failed = None
- for experiment_id in ret:
- if False in ret[experiment_id]:
- if delete_failed is None:
- delete_failed = []
- delete_failed.append(experiment_id)
-
- logger.info("CORTEXLAB_API DeleteSlice %s answer %s"%(slice_record, \
- delete_failed))
- return delete_failed or True
-
-
- #TODO AddPersonKey 04/07/2012 SA
- def AddPersonKey(self, person_uid, old_attributes_dict, new_key_dict):
- """Adds a new key to the specified account. Adds the key to the
- iotlab ldap, provided that the person_uid is valid.
-
- Non-admins can only modify their own keys.
-
- :param person_uid: user's iotlab login in LDAP
- :param old_attributes_dict: dict with the user's old sshPublicKey
- :param new_key_dict: dict with the user's new sshPublicKey
- :type person_uid: string
-
-
- :rtype: Boolean
- :returns: True if the key has been modified, False otherwise.
-
- """
- ret = self.ldap.LdapModify(person_uid, old_attributes_dict, \
- new_key_dict)
- logger.warning("CORTEXLAB_API AddPersonKey EMPTY - DO NOTHING \r\n ")
- return ret['bool']
-
- def DeleteLeases(self, leases_id_list, slice_hrn):
- """
-
- Deletes several leases, based on their experiment ids and the slice
- they are associated with. Uses DeleteOneLease to delete the
- experiment on the testbed. Note that one slice can contain multiple
- experiments, and in this
- case all the experiments in the leases_id_list MUST belong to this
- same slice, since there is only one slice hrn provided here.
-
- :param leases_id_list: list of job ids that belong to the slice whose
- slice hrn is provided.
- :param slice_hrn: the slice hrn.
- :type slice_hrn: string
-
- .. warning:: Does not have a return value since there was no easy
- way to handle failure when dealing with multiple job delete. Plus,
- there was no easy way to report it to the user.
-
- """
- logger.debug("CORTEXLAB_API DeleteLeases leases_id_list %s slice_hrn %s \
- \r\n " %(leases_id_list, slice_hrn))
- for experiment_id in leases_id_list:
- self.DeleteOneLease(experiment_id, slice_hrn)
-
- return
-
-
- @staticmethod
- def _process_walltime(duration):
- """ Calculates the walltime in seconds from the duration in H:M:S
- specified in the RSpec.
-
- """
- if duration:
- # Fixing the walltime by adding a few delays.
- # First put the walltime in seconds oarAdditionalDelay = 20;
- # additional delay for /bin/sleep command to
- # take in account prologue and epilogue scripts execution
- # int walltimeAdditionalDelay = 240; additional delay
- #for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
- #in oar.conf
- # Put the duration in seconds first
- #desired_walltime = duration * 60
- desired_walltime = duration
- total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
- sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
- walltime = []
- #Put the walltime back in str form
- #First get the hours
- walltime.append(str(total_walltime / 3600))
- total_walltime = total_walltime - 3600 * int(walltime[0])
- #Get the remaining minutes
- walltime.append(str(total_walltime / 60))
- total_walltime = total_walltime - 60 * int(walltime[1])
- #Get the seconds
- walltime.append(str(total_walltime))
-
- else:
- logger.log_exc(" __process_walltime duration null")
-
- return walltime, sleep_walltime
-
- @staticmethod
- def _create_job_structure_request_for_OAR(lease_dict):
- """ Creates the structure needed for a correct POST on OAR.
- Makes the timestamp transformation into the appropriate format.
- Sends the POST request to create the job with the resources in
- added_nodes.
-
- """
-
- nodeid_list = []
- reqdict = {}
-
-
- reqdict['workdir'] = '/tmp'
- reqdict['resource'] = "{network_address in ("
-
- for node in lease_dict['added_nodes']:
- logger.debug("\r\n \r\n OARrestapi \t \
- __create_job_structure_request_for_OAR node %s" %(node))
-
- # Get the ID of the node
- nodeid = node
- reqdict['resource'] += "'" + nodeid + "', "
- nodeid_list.append(nodeid)
-
- custom_length = len(reqdict['resource'])- 2
- reqdict['resource'] = reqdict['resource'][0:custom_length] + \
- ")}/nodes=" + str(len(nodeid_list))
-
-
- walltime, sleep_walltime = \
- CortexlabShell._process_walltime(\
- int(lease_dict['lease_duration']))
-
-
- reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
- ":" + str(walltime[1]) + ":" + str(walltime[2])
- reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
-
- #In case of a scheduled experiment (not immediate)
- #To run an XP immediately, don't specify date and time in RSpec
- #They will be set to None.
- if lease_dict['lease_start_time'] is not '0':
- #Readable time accepted by OAR
- start_time = datetime.fromtimestamp( \
- int(lease_dict['lease_start_time'])).\
- strftime(lease_dict['time_format'])
- reqdict['reservation'] = start_time
- #If there is not start time, Immediate XP. No need to add special
- # OAR parameters
-
-
- reqdict['type'] = "deploy"
- reqdict['directory'] = ""
- reqdict['name'] = "SFA_" + lease_dict['slice_user']
-
- return reqdict
-
-
- def LaunchExperimentOnTestbed(self, added_nodes, slice_name, \
- lease_start_time, lease_duration, slice_user=None):
-
- """
- Create an experiment request structure based on the information provided
- and schedule/run the experiment on the testbed by reserving the nodes.
- :param added_nodes: list of nodes that belong to the described lease.
- :param slice_name: the slice hrn associated to the lease.
- :param lease_start_time: timestamp of the lease startting time.
- :param lease_duration: lease duration in minutes
-
- """
- lease_dict = {}
- # Add in the dict whatever is necessary to create the experiment on
- # the testbed
- lease_dict['lease_start_time'] = lease_start_time
- lease_dict['lease_duration'] = lease_duration
- lease_dict['added_nodes'] = added_nodes
- lease_dict['slice_name'] = slice_name
- lease_dict['slice_user'] = slice_user
- lease_dict['grain'] = self.GetLeaseGranularity()
-
-
-
- answer = self.query_sites.schedule_experiment(lease_dict)
- try:
- experiment_id = answer['id']
- except KeyError:
- logger.log_exc("CORTEXLAB_API \tLaunchExperimentOnTestbed \
- Impossible to create xp %s " %(answer))
- return None
-
- if experiment_id :
- logger.debug("CORTEXLAB_API \tLaunchExperimentOnTestbed \
- experiment_id %s added_nodes %s slice_user %s"
- %(experiment_id, added_nodes, slice_user))
-
-
- return experiment_id
-
-
-
-
- #Delete the jobs from job_iotlab table
- def DeleteSliceFromNodes(self, slice_record):
- """
- Deletes all the running or scheduled jobs of a given slice
- given its record.
-
- :param slice_record: record of the slice, must contain experiment_id,
- user
- :type slice_record: dict
- :returns: dict of the jobs'deletion status. Success= True, Failure=
- False, for each job id.
- :rtype: dict
-
- .. note: used in driver delete_sliver
-
- """
- logger.debug("CORTEXLAB_API \t DeleteSliceFromNodes %s "
- % (slice_record))
-
- if isinstance(slice_record['experiment_id'], list):
- experiment_bool_answer = {}
- for experiment_id in slice_record['experiment_id']:
- ret = self.DeleteOneLease(experiment_id, slice_record['user'])
-
- experiment_bool_answer.update(ret)
-
- else:
- experiment_bool_answer = [self.DeleteOneLease(
- slice_record['experiment_id'],
- slice_record['user'])]
-
- return experiment_bool_answer
-
-
-
- def GetLeaseGranularity(self):
- """ Returns the granularity of an experiment in the Iotlab testbed.
- OAR uses seconds for experiments duration , the granulaity is also
- defined in seconds.
- Experiments which last less than 10 min (600 sec) are invalid"""
- return self.grain
-
- @staticmethod
- def filter_lease(reservation_list, filter_type, filter_value ):
- """Filters the lease reservation list by removing each lease whose
- filter_type is not equal to the filter_value provided. Returns the list
- of leases in one slice, defined by the slice_hrn if filter_type
- is 'slice_hrn'. Otherwise, returns all leases scheduled starting from
- the filter_value if filter_type is 't_from'.
-
- :param reservation_list: leases list
- :type reservation_list: list of dictionary
- :param filter_type: can be either 't_from' or 'slice hrn'
- :type filter_type: string
- :param filter_value: depending on the filter_type, can be the slice_hrn
- or can be defining a timespan.
- :type filter_value: if filter_type is 't_from', filter_value is int.
- if filter_type is 'slice_hrn', filter_value is a string.
-
-
- :returns: filtered_reservation_list, contains only leases running or
- scheduled in the given slice (wanted_slice).Dict keys are
- 'lease_id','reserved_nodes','slice_id', 'state', 'user',
- 'component_id_list','slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list of dict
-
- """
- filtered_reservation_list = list(reservation_list)
- logger.debug("IOTLAB_API \t filter_lease_name reservation_list %s" \
- % (reservation_list))
- try:
- for reservation in reservation_list:
- if \
- (filter_type is 'slice_hrn' and \
- reservation['slice_hrn'] != filter_value) or \
- (filter_type is 't_from' and \
- reservation['t_from'] > filter_value):
- filtered_reservation_list.remove(reservation)
- except TypeError:
- logger.log_exc("Iotlabshell filter_lease : filter_type %s \
- filter_value %s not in lease" %(filter_type,
- filter_value))
-
- return filtered_reservation_list
-
- # @staticmethod
- # def filter_lease_name(reservation_list, filter_value):
- # filtered_reservation_list = list(reservation_list)
- # logger.debug("CORTEXLAB_API \t filter_lease_name reservation_list %s" \
- # % (reservation_list))
- # for reservation in reservation_list:
- # if 'slice_hrn' in reservation and \
- # reservation['slice_hrn'] != filter_value:
- # filtered_reservation_list.remove(reservation)
-
- # logger.debug("CORTEXLAB_API \t filter_lease_name filtered_reservation_list %s" \
- # % (filtered_reservation_list))
- # return filtered_reservation_list
-
- # @staticmethod
- # def filter_lease_start_time(reservation_list, filter_value):
- # filtered_reservation_list = list(reservation_list)
-
- # for reservation in reservation_list:
- # if 't_from' in reservation and \
- # reservation['t_from'] > filter_value:
- # filtered_reservation_list.remove(reservation)
-
- # return filtered_reservation_list
-
- def complete_leases_info(self, unfiltered_reservation_list, db_xp_dict):
-
- """Check that the leases list of dictionaries contains the appropriate
- fields and piece of information here
- :param unfiltered_reservation_list: list of leases to be completed.
- :param db_xp_dict: leases information in the lease_sfa table
- :returns local_unfiltered_reservation_list: list of leases completed.
- list of dictionaries describing the leases, with all the needed
- information (sfa,ldap,nodes)to identify one particular lease.
- :returns testbed_xp_list: list of experiments'ids running or scheduled
- on the testbed.
- :rtype local_unfiltered_reservation_list: list of dict
- :rtype testbed_xp_list: list
-
- """
- testbed_xp_list = []
- local_unfiltered_reservation_list = list(unfiltered_reservation_list)
- # slice_hrn and lease_id are in the lease_table,
- # so they are in the db_xp_dict.
- # component_id_list : list of nodes xrns
- # reserved_nodes : list of nodes' hostnames
- # slice_id : slice urn, can be made from the slice hrn using hrn_to_urn
- for resa in local_unfiltered_reservation_list:
-
- #Construct list of scheduled experiments (runing, waiting..)
- testbed_xp_list.append(resa['lease_id'])
- #If there is information on the experiment in the lease table
- #(slice used and experiment id), meaning the experiment was created
- # using sfa
- if resa['lease_id'] in db_xp_dict:
- xp_info = db_xp_dict[resa['lease_id']]
- logger.debug("CORTEXLAB_API \tGetLeases xp_info %s"
- % (xp_info))
- resa['slice_hrn'] = xp_info['slice_hrn']
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a cortexlab slice, created via the
- # cortexlab portal
- else:
- resa['slice_id'] = hrn_to_urn(self.root_auth + '.' +
- resa['user'] + "_slice", 'slice')
- resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
-
- resa['component_id_list'] = []
- #Transform the hostnames into urns (component ids)
- for node in resa['reserved_nodes']:
-
- iotlab_xrn = xrn_object(self.root_auth, node)
- resa['component_id_list'].append(iotlab_xrn.urn)
-
- return local_unfiltered_reservation_list, testbed_xp_list
-
-
-#TODO FUNCTIONS SECTION 04/07/2012 SA
-
- ##TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
- ##04/07/2012 SA
- #@staticmethod
- #def UnBindObjectFromPeer( auth, object_type, object_id, shortname):
- #""" This method is a hopefully temporary hack to let the sfa correctly
- #detach the objects it creates from a remote peer object. This is
- #needed so that the sfa federation link can work in parallel with
- #RefreshPeer, as RefreshPeer depends on remote objects being correctly
- #marked.
- #Parameters:
- #auth : struct, API authentication structure
- #AuthMethod : string, Authentication method to use
- #object_type : string, Object type, among 'site','person','slice',
- #'node','key'
- #object_id : int, object_id
- #shortname : string, peer shortname
- #FROM PLC DOC
-
- #"""
- #logger.warning("CORTEXLAB_API \tUnBindObjectFromPeer EMPTY-\
- #DO NOTHING \r\n ")
- #return
-
- ##TODO Is BindObjectToPeer still necessary ? Currently does nothing
- ##04/07/2012 SA
- #|| Commented out 28/05/13 SA
- #def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
- #remote_object_id=None):
- #"""This method is a hopefully temporary hack to let the sfa correctly
- #attach the objects it creates to a remote peer object. This is needed
- #so that the sfa federation link can work in parallel with RefreshPeer,
- #as RefreshPeer depends on remote objects being correctly marked.
- #Parameters:
- #shortname : string, peer shortname
- #remote_object_id : int, remote object_id, set to 0 if unknown
- #FROM PLC API DOC
-
- #"""
- #logger.warning("CORTEXLAB_API \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
- #return
-
- ##TODO UpdateSlice 04/07/2012 SA || Commented out 28/05/13 SA
- ##Funciton should delete and create another job since oin iotlab slice=job
- #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
- #"""Updates the parameters of an existing slice with the values in
- #slice_fields.
- #Users may only update slices of which they are members.
- #PIs may update any of the slices at their sites, or any slices of
- #which they are members. Admins may update any slice.
- #Only PIs and admins may update max_nodes. Slices cannot be renewed
- #(by updating the expires parameter) more than 8 weeks into the future.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- #logger.warning("CORTEXLAB_API UpdateSlice EMPTY - DO NOTHING \r\n ")
- #return
-
- #Unused SA 30/05/13, we only update the user's key or we delete it.
- ##TODO UpdatePerson 04/07/2012 SA
- #def UpdatePerson(self, iotlab_hrn, federated_hrn, person_fields=None):
- #"""Updates a person. Only the fields specified in person_fields
- #are updated, all other fields are left untouched.
- #Users and techs can only update themselves. PIs can only update
- #themselves and other non-PIs at their sites.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- ##new_row = FederatedToIotlab(iotlab_hrn, federated_hrn)
- ##self.leases_db.testbed_session.add(new_row)
- ##self.leases_db.testbed_session.commit()
-
- #logger.debug("CORTEXLAB_API UpdatePerson EMPTY - DO NOTHING \r\n ")
- #return
-
-
-
-
- #TODO : test
- def DeleteKey(self, user_record, key_string):
- """Deletes a key in the LDAP entry of the specified user.
-
- Removes the key_string from the user's key list and updates the LDAP
- user's entry with the new key attributes.
-
- :param key_string: The ssh key to remove
- :param user_record: User's record
- :type key_string: string
- :type user_record: dict
- :returns: True if sucessful, False if not.
- :rtype: Boolean
-
- """
- all_user_keys = user_record['keys']
- all_user_keys.remove(key_string)
- new_attributes = {'sshPublicKey':all_user_keys}
- ret = self.ldap.LdapModifyUser(user_record, new_attributes)
- logger.debug("CORTEXLAB_API DeleteKey %s- " % (ret))
- return ret['bool']
-
-
-
-
-
-
-
- #Update slice unused, therefore sfa_fields_to_iotlab_fields unused
- #SA 30/05/13
- #@staticmethod
- #def sfa_fields_to_iotlab_fields(sfa_type, hrn, record):
- #"""
- #"""
-
- #iotlab_record = {}
- ##for field in record:
- ## iotlab_record[field] = record[field]
-
- #if sfa_type == "slice":
- ##instantion used in get_slivers ?
- #if not "instantiation" in iotlab_record:
- #iotlab_record["instantiation"] = "iotlab-instantiated"
- ##iotlab_record["hrn"] = hrn_to_pl_slicename(hrn)
- ##Unused hrn_to_pl_slicename because Iotlab's hrn already
- ##in the appropriate form SA 23/07/12
- #iotlab_record["hrn"] = hrn
- #logger.debug("CORTEXLAB_API.PY sfa_fields_to_iotlab_fields \
- #iotlab_record %s " %(iotlab_record['hrn']))
- #if "url" in record:
- #iotlab_record["url"] = record["url"]
- #if "description" in record:
- #iotlab_record["description"] = record["description"]
- #if "expires" in record:
- #iotlab_record["expires"] = int(record["expires"])
-
- ##nodes added by OAR only and then imported to SFA
- ##elif type == "node":
- ##if not "hostname" in iotlab_record:
- ##if not "hostname" in record:
- ##raise MissingSfaInfo("hostname")
- ##iotlab_record["hostname"] = record["hostname"]
- ##if not "model" in iotlab_record:
- ##iotlab_record["model"] = "geni"
-
- ##One authority only
- ##elif type == "authority":
- ##iotlab_record["login_base"] = hrn_to_iotlab_login_base(hrn)
-
- ##if not "name" in iotlab_record:
- ##iotlab_record["name"] = hrn
-
- ##if not "abbreviated_name" in iotlab_record:
- ##iotlab_record["abbreviated_name"] = hrn
-
- ##if not "enabled" in iotlab_record:
- ##iotlab_record["enabled"] = True
-
- ##if not "is_public" in iotlab_record:
- ##iotlab_record["is_public"] = True
-
- #return iotlab_record
-
-
-
-
-
-
-
-
-
-
+++ /dev/null
-"""
-This file defines the IotlabSlices class by which all the slice checkings
-upon lease creation are done.
-"""
-from sfa.util.xrn import get_authority, urn_to_hrn
-from sfa.util.sfalogging import logger
-
-MAXINT = 2L**31-1
-
-
-class CortexlabSlices:
- """
- This class is responsible for checking the slice when creating a
- lease or a sliver. Those checks include verifying that the user is valid,
- that the slice is known from the testbed or from our peers, that the list
- of nodes involved has not changed (in this case the lease is modified
- accordingly).
- """
- rspec_to_slice_tag = {'max_rate': 'net_max_rate'}
-
- def __init__(self, driver):
- """
- Get the reference to the driver here.
- """
- self.driver = driver
-
- def get_peer(self, xrn):
- """
- Finds the authority of a resource based on its xrn.
- If the authority is Iotlab (local) return None,
- Otherwise, look up in the DB if Iotlab is federated with this site
- authority and returns its DB record if it is the case.
-
- :param xrn: resource's xrn
- :type xrn: string
- :returns: peer record
- :rtype: dict
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
- #Does this slice belong to a local site or a peer cortexlab site?
- peer = None
-
- # get this slice's authority (site)
- slice_authority = get_authority(hrn)
- #Iotlab stuff
- #This slice belongs to the current site
- if slice_authority == self.driver.testbed_shell.root_auth:
- site_authority = slice_authority
- return None
-
- site_authority = get_authority(slice_authority).lower()
- # get this site's authority (sfa root authority or sub authority)
-
- logger.debug("CortexlabSlices \t get_peer slice_authority %s \
- site_authority %s hrn %s"
- % (slice_authority, site_authority, hrn))
-
- # check if we are already peered with this site_authority
- #if so find the peer record
- peers = self.driver.GetPeers(peer_filter=site_authority)
- for peer_record in peers:
- if site_authority == peer_record.hrn:
- peer = peer_record
- logger.debug(" CortexlabSlices \tget_peer peer %s " % (peer))
- return peer
-
- def get_sfa_peer(self, xrn):
- """Returns the authority name for the xrn or None if the local site
- is the authority.
-
- :param xrn: the xrn of the resource we are looking the authority for.
- :type xrn: string
- :returns: the resources's authority name.
- :rtype: string
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
-
- # return the authority for this hrn or None if we are the authority
- sfa_peer = None
- slice_authority = get_authority(hrn)
- site_authority = get_authority(slice_authority)
-
- if site_authority != self.driver.hrn:
- sfa_peer = site_authority
-
- return sfa_peer
-
- def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
- """
- Compare requested leases with the leases already scheduled/
- running in OAR. If necessary, delete and recreate modified leases,
- and delete no longer requested ones.
-
- :param sfa_slice: sfa slice record
- :param requested_jobs_dict: dictionary of requested leases
- :param peer: sfa peer record
-
- :type sfa_slice: dict
- :type requested_jobs_dict: dict
- :type peer: dict
- :returns: leases list of dictionary
- :rtype: list
-
- """
-
- logger.debug("CortexlabSlices verify_slice_leases sfa_slice %s "
- % (sfa_slice))
- #First get the list of current leases from OAR
- leases = self.driver.GetLeases({'slice_hrn': sfa_slice['hrn']})
- logger.debug("CortexlabSlices verify_slice_leases requested_jobs_dict %s \
- leases %s " % (requested_jobs_dict, leases))
-
- current_nodes_reserved_by_start_time = {}
- requested_nodes_by_start_time = {}
- leases_by_start_time = {}
- reschedule_jobs_dict = {}
-
- #Create reduced dictionary with key start_time and value
- # the list of nodes
- #-for the leases already registered by OAR first
- # then for the new leases requested by the user
-
- #Leases already scheduled/running in OAR
- for lease in leases:
- current_nodes_reserved_by_start_time[lease['t_from']] = \
- lease['reserved_nodes']
- leases_by_start_time[lease['t_from']] = lease
-
- #First remove job whose duration is too short
- for job in requested_jobs_dict.values():
- job['duration'] = \
- str(int(job['duration']) \
- * self.driver.testbed_shell.GetLeaseGranularity())
- if job['duration'] < self.driver.testbed_shell.GetLeaseGranularity():
- del requested_jobs_dict[job['start_time']]
-
- #Requested jobs
- for start_time in requested_jobs_dict:
- requested_nodes_by_start_time[int(start_time)] = \
- requested_jobs_dict[start_time]['hostname']
- #Check if there is any difference between the leases already
- #registered in OAR and the requested jobs.
- #Difference could be:
- #-Lease deleted in the requested jobs
- #-Added/removed nodes
- #-Newly added lease
-
- logger.debug("CortexlabSlices verify_slice_leases \
- requested_nodes_by_start_time %s \
- "% (requested_nodes_by_start_time))
- #Find all deleted leases
- start_time_list = \
- list(set(leases_by_start_time.keys()).\
- difference(requested_nodes_by_start_time.keys()))
- deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
- for start_time in start_time_list]
-
-
- #Find added or removed nodes in exisiting leases
- for start_time in requested_nodes_by_start_time:
- logger.debug("CortexlabSlices verify_slice_leases start_time %s \
- "%( start_time))
- if start_time in current_nodes_reserved_by_start_time:
-
- if requested_nodes_by_start_time[start_time] == \
- current_nodes_reserved_by_start_time[start_time]:
- continue
-
- else:
- update_node_set = \
- set(requested_nodes_by_start_time[start_time])
- added_nodes = \
- update_node_set.difference(\
- current_nodes_reserved_by_start_time[start_time])
- shared_nodes = \
- update_node_set.intersection(\
- current_nodes_reserved_by_start_time[start_time])
- old_nodes_set = \
- set(\
- current_nodes_reserved_by_start_time[start_time])
- removed_nodes = \
- old_nodes_set.difference(\
- requested_nodes_by_start_time[start_time])
- logger.debug("CortexlabSlices verify_slice_leases \
- shared_nodes %s added_nodes %s removed_nodes %s"\
- %(shared_nodes, added_nodes,removed_nodes ))
- #If the lease is modified, delete it before
- #creating it again.
- #Add the deleted lease job id in the list
- #WARNING :rescheduling does not work if there is already
- # 2 running/scheduled jobs because deleting a job
- #takes time SA 18/10/2012
- if added_nodes or removed_nodes:
- deleted_leases.append(\
- leases_by_start_time[start_time]['lease_id'])
- #Reschedule the job
- if added_nodes or shared_nodes:
- reschedule_jobs_dict[str(start_time)] = \
- requested_jobs_dict[str(start_time)]
-
- else:
- #New lease
-
- job = requested_jobs_dict[str(start_time)]
- logger.debug("CortexlabSlices \
- NEWLEASE slice %s job %s"
- % (sfa_slice, job))
- job_id = self.driver.AddLeases(job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
- if job_id is not None:
- new_leases = self.driver.GetLeases(login=
- sfa_slice['login'])
- for new_lease in new_leases:
- leases.append(new_lease)
-
- #Deleted leases are the ones with lease id not declared in the Rspec
- if deleted_leases:
- self.driver.testbed_shell.DeleteLeases(deleted_leases,
- sfa_slice['user']['uid'])
- logger.debug("CortexlabSlices \
- verify_slice_leases slice %s deleted_leases %s"
- % (sfa_slice, deleted_leases))
-
- if reschedule_jobs_dict:
- for start_time in reschedule_jobs_dict:
- job = reschedule_jobs_dict[start_time]
- self.driver.AddLeases(job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
- return leases
-
- def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
- """Check for wanted and unwanted nodes in the slice.
-
- Removes nodes and associated leases that the user does not want anymore
- by deleteing the associated job in OAR (DeleteSliceFromNodes).
- Returns the nodes' hostnames that are going to be in the slice.
-
- :param sfa_slice: slice record. Must contain node_ids and list_node_ids.
-
- :param requested_slivers: list of requested nodes' hostnames.
- :param peer: unused so far.
-
- :type sfa_slice: dict
- :type requested_slivers: list
- :type peer: string
-
- :returns: list requested nodes hostnames
- :rtype: list
-
- .. warning:: UNUSED SQA 24/07/13
- .. seealso:: DeleteSliceFromNodes
- .. todo:: check what to do with the peer? Can not remove peer nodes from
- slice here. Anyway, in this case, the peer should have gotten the
- remove request too.
-
- """
- current_slivers = []
- deleted_nodes = []
-
- if 'node_ids' in sfa_slice:
- nodes = self.driver.testbed_shell.GetNodes(
- sfa_slice['list_node_ids'],
- ['hostname'])
- current_slivers = [node['hostname'] for node in nodes]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(current_slivers).
- difference(requested_slivers))
-
- logger.debug("CortexlabSlices \tverify_slice_nodes slice %s\
- \r\n \r\n deleted_nodes %s"
- % (sfa_slice, deleted_nodes))
-
- if deleted_nodes:
- #Delete the entire experience
- self.driver.testbed_shell.DeleteSliceFromNodes(sfa_slice)
- return nodes
-
- def verify_slice(self, slice_hrn, slice_record, sfa_peer):
- """Ensures slice record exists.
-
- The slice record must exist either in Iotlab or in the other
- federated testbed (sfa_peer). If the slice does not belong to Iotlab,
- check if the user already exists in LDAP. In this case, adds the slice
- to the sfa DB and associates its LDAP user.
-
- :param slice_hrn: slice's name
- :param slice_record: sfa record of the slice
- :param sfa_peer: name of the peer authority if any.(not Iotlab).
-
- :type slice_hrn: string
- :type slice_record: dictionary
- :type sfa_peer: string
-
- .. seealso:: AddSlice
-
-
- """
-
- slicename = slice_hrn
- # check if slice belongs to Iotlab
- slices_list = self.driver.GetSlices(
- slice_filter=slicename, slice_filter_type='slice_hrn')
-
- sfa_slice = None
-
- if slices_list:
- for sl in slices_list:
-
- logger.debug("CortexlabSlices \t verify_slice slicename %s \
- slices_list %s sl %s \r slice_record %s"
- % (slicename, slices_list, sl, slice_record))
- sfa_slice = sl
- sfa_slice.update(slice_record)
-
- else:
- #Search for user in ldap based on email SA 14/11/12
- ldap_user = self.driver.testbed_shell.ldap.LdapFindUser(\
- slice_record['user'])
- logger.debug(" CortexlabSlices \tverify_slice Oups \
- slice_record %s sfa_peer %s ldap_user %s"
- % (slice_record, sfa_peer, ldap_user))
- #User already registered in ldap, meaning user should be in SFA db
- #and hrn = sfa_auth+ uid
- sfa_slice = {'hrn': slicename,
- 'node_list': [],
- 'authority': slice_record['authority'],
- 'gid': slice_record['gid'],
- 'slice_id': slice_record['record_id'],
- 'reg-researchers': slice_record['reg-researchers'],
- 'peer_authority': str(sfa_peer)
- }
-
- if ldap_user:
- hrn = self.driver.testbed_shell.root_auth + '.' \
- + ldap_user['uid']
- user = self.driver.get_user_record(hrn)
-
- logger.debug(" CortexlabSlices \tverify_slice hrn %s USER %s"
- % (hrn, user))
-
- # add the external slice to the local SFA DB
- if sfa_slice:
- self.driver.AddSlice(sfa_slice, user)
-
- logger.debug("CortexlabSlices \tverify_slice ADDSLICE OK")
- return sfa_slice
-
-
- def verify_persons(self, slice_hrn, slice_record, users, options=None):
- """Ensures the users in users list exist and are enabled in LDAP. Adds
- person if needed(AddPerson).
-
- Checking that a user exist is based on the user's email. If the user is
- still not found in the LDAP, it means that the user comes from another
- federated testbed. In this case an account has to be created in LDAP
- so as to enable the user to use the testbed, since we trust the testbed
- he comes from. This is done by calling AddPerson.
-
- :param slice_hrn: slice name
- :param slice_record: record of the slice_hrn
- :param users: users is a record list. Records can either be
- local records or users records from known and trusted federated
- sites.If the user is from another site that cortex;ab doesn't trust
- yet, then Resolve will raise an error before getting to allocate.
-
- :type slice_hrn: string
- :type slice_record: string
- :type users: list
-
- .. seealso:: AddPerson
- .. note:: Removed unused peer and sfa_peer parameters. SA 18/07/13.
-
-
- """
-
- if options is None: options={}
-
- logger.debug("CortexlabSlices \tverify_persons \tslice_hrn %s \
- \t slice_record %s\r\n users %s \t "
- % (slice_hrn, slice_record, users))
-
-
- users_by_email = {}
- #users_dict : dict whose keys can either be the user's hrn or its id.
- #Values contains only id and hrn
- users_dict = {}
-
- #First create dicts by hrn and id for each user in the user record list:
- for info in users:
- # if 'slice_record' in info:
- # slice_rec = info['slice_record']
- # if 'user' in slice_rec :
- # user = slice_rec['user']
-
- if 'email' in info:
- users_by_email[info['email']] = info
- users_dict[info['email']] = info
-
-
- logger.debug("CortexlabSlices.PY \t verify_person \
- users_dict %s \r\n user_by_email %s \r\n "
- %(users_dict, users_by_email))
-
- existing_user_ids = []
- existing_user_emails = []
- existing_users = []
- # Check if user is in Iotlab LDAP using its hrn.
- # Assuming Iotlab is centralised : one LDAP for all sites,
- # user's record_id unknown from LDAP
- # LDAP does not provide users id, therefore we rely on email to find the
- # user in LDAP
-
- if users_by_email:
- #Construct the list of filters (list of dicts) for GetPersons
- filter_user = [users_by_email[email] for email in users_by_email]
- #Check user i in LDAP with GetPersons
- #Needed because what if the user has been deleted in LDAP but
- #is still in SFA?
- existing_users = self.driver.testbed_shell.GetPersons(filter_user)
- logger.debug(" \r\n CortexlabSlices.PY \tverify_person filter_user \
- %s existing_users %s "
- % (filter_user, existing_users))
- #User is in LDAP
- if existing_users:
- for user in existing_users:
- user['login'] = user['uid']
- users_dict[user['email']].update(user)
- existing_user_emails.append(
- users_dict[user['email']]['email'])
-
-
- # User from another known trusted federated site. Check
- # if a cortexlab account matching the email has already been created.
- else:
- req = 'mail='
- if isinstance(users, list):
- req += users[0]['email']
- else:
- req += users['email']
- ldap_reslt = self.driver.testbed_shell.ldap.LdapSearch(req)
-
- if ldap_reslt:
- logger.debug(" CortexlabSlices.PY \tverify_person users \
- USER already in Iotlab \t ldap_reslt %s \
- " % (ldap_reslt))
- existing_users.append(ldap_reslt[1])
-
- else:
- #User not existing in LDAP
- logger.debug("CortexlabSlices.PY \tverify_person users \
- not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
- ldap_reslt %s " % (users, ldap_reslt))
-
- requested_user_emails = users_by_email.keys()
- requested_user_hrns = \
- [users_by_email[user]['hrn'] for user in users_by_email]
- logger.debug("CortexlabSlices.PY \tverify_person \
- users_by_email %s " % (users_by_email))
-
- #Check that the user of the slice in the slice record
- #matches one of the existing users
- try:
- if slice_record['reg-researchers'][0] in requested_user_hrns:
- logger.debug(" CortexlabSlices \tverify_person ['PI']\
- slice_record %s" % (slice_record))
-
- except KeyError:
- pass
-
- # users to be added, removed or updated
- #One user in one cortexlab slice : there should be no need
- #to remove/ add any user from/to a slice.
- #However a user from SFA which is not registered in Iotlab yet
- #should be added to the LDAP.
- added_user_emails = set(requested_user_emails).\
- difference(set(existing_user_emails))
-
-
- #self.verify_keys(existing_slice_users, updated_users_list, \
- #peer, append)
-
- added_persons = []
- # add new users
- #requested_user_email is in existing_user_emails
- if len(added_user_emails) == 0:
- slice_record['login'] = users_dict[requested_user_emails[0]]['uid']
- logger.debug(" CortexlabSlices \tverify_person QUICK DIRTY %s"
- % (slice_record))
-
- for added_user_email in added_user_emails:
- added_user = users_dict[added_user_email]
- logger.debug(" CortexlabSlices \r\n \r\n \t verify_person \
- added_user %s" % (added_user))
- person = {}
- person['peer_person_id'] = None
- k_list = ['first_name', 'last_name', 'person_id']
- for k in k_list:
- if k in added_user:
- person[k] = added_user[k]
-
- person['pkey'] = added_user['keys'][0]
- person['mail'] = added_user['email']
- person['email'] = added_user['email']
- person['key_ids'] = added_user.get('key_ids', [])
-
- ret = self.driver.testbed_shell.AddPerson(person)
- if 'uid' in ret:
- # meaning bool is True and the AddPerson was successful
- person['uid'] = ret['uid']
- slice_record['login'] = person['uid']
- else:
- # error message in ret
- logger.debug(" CortexlabSlices ret message %s" %(ret))
-
- logger.debug(" CortexlabSlices \r\n \r\n \t THE SECOND verify_person\
- person %s" % (person))
- #Update slice_Record with the id now known to LDAP
-
-
- added_persons.append(person)
- return added_persons
-
-
- def verify_keys(self, persons, users, peer, options=None):
- """
- .. warning:: unused
- """
- if options is None: options={}
- # existing keys
- key_ids = []
- for person in persons:
- key_ids.extend(person['key_ids'])
- keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
-
- keydict = {}
- for key in keylist:
- keydict[key['key']] = key['key_id']
- existing_keys = keydict.keys()
-
- persondict = {}
- for person in persons:
- persondict[person['email']] = person
-
- # add new keys
- requested_keys = []
- updated_persons = []
- users_by_key_string = {}
- for user in users:
- user_keys = user.get('keys', [])
- updated_persons.append(user)
- for key_string in user_keys:
- users_by_key_string[key_string] = user
- requested_keys.append(key_string)
- if key_string not in existing_keys:
- key = {'key': key_string, 'key_type': 'ssh'}
- #try:
- ##if peer:
- #person = persondict[user['email']]
- #self.driver.testbed_shell.UnBindObjectFromPeer(
- # 'person',person['person_id'],
- # peer['shortname'])
- ret = self.driver.testbed_shell.AddPersonKey(
- user['email'], key)
- #if peer:
- #key_index = user_keys.index(key['key'])
- #remote_key_id = user['key_ids'][key_index]
- #self.driver.testbed_shell.BindObjectToPeer('key', \
- #key['key_id'], peer['shortname'], \
- #remote_key_id)
-
-
-
- # remove old keys (only if we are not appending)
- append = options.get('append', True)
- if append is False:
- removed_keys = set(existing_keys).difference(requested_keys)
- for key in removed_keys:
- #if peer:
- #self.driver.testbed_shell.UnBindObjectFromPeer('key', \
- #key, peer['shortname'])
-
- user = users_by_key_string[key]
- self.driver.testbed_shell.DeleteKey(user, key)
-
- return
+++ /dev/null
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = build
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
- @echo "Please use \`make <target>' where <target> is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " texinfo to make Texinfo files"
- @echo " info to make Texinfo files and run them through makeinfo"
- @echo " gettext to make PO message catalogs"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/cortexlab_sfa_driver.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/cortexlab_sfa_driver.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/cortexlab_sfa_driver"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/cortexlab_sfa_driver"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo
- @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
- @echo "Run \`make' in that directory to run these through makeinfo" \
- "(use \`make info' here to do that automatically)."
-
-info:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo "Running Texinfo files through makeinfo..."
- make -C $(BUILDDIR)/texinfo info
- @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
- $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
- @echo
- @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# cortexlab_sfa_driver documentation build configuration file, created by
-# sphinx-quickstart on Mon Nov 18 12:11:50 2013.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../../../'))
-sys.path.insert(0, os.path.abspath('../../../storage/'))
-sys.path.insert(0, os.path.abspath('../../../../'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/versions/'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/'))
-sys.path.insert(0, os.path.abspath('../../../importer/'))
-print sys.path
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Cortexlab SFA driver'
-copyright = u'2013, Sandrine Avakian'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.1'
-# The full version, including alpha/beta/rc tags.
-release = '0.1'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'cortexlab_sfa_driverdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'cortexlab_sfa_driver.tex', u'cortexlab\\_sfa\\_driver Documentation',
- u'Sandrine Avakian', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'cortexlab_sfa_driver', u'cortexlab_sfa_driver Documentation',
- [u'Sandrine Avakian'], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output ------------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'cortexlab_sfa_driver', u'cortexlab_sfa_driver Documentation',
- u'Sandrine Avakian', 'cortexlab_sfa_driver', 'One line description of project.',
- 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+++ /dev/null
-cortexlab Package
-=================
-
-:mod:`LDAPapi` Module
----------------------
-
-.. automodule:: cortexlab.LDAPapi
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabaggregate` Module
---------------------------------
-
-.. automodule:: cortexlab.cortexlabaggregate
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabdriver` Module
------------------------------
-
-.. automodule:: cortexlab.cortexlabdriver
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabnodes` Module
-----------------------------
-
-.. automodule:: cortexlab.cortexlabnodes
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabpostgres` Module
--------------------------------
-
-.. automodule:: cortexlab.cortexlabpostgres
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabshell` Module
-----------------------------
-
-.. automodule:: cortexlab.cortexlabshell
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`cortexlabslices` Module
------------------------------
-
-.. automodule:: cortexlab.cortexlabslices
- :members:
- :undoc-members:
- :show-inheritance:
-
+++ /dev/null
-.. cortexlab_sfa_driver documentation master file, created by
- sphinx-quickstart on Mon Nov 18 12:11:50 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-Welcome to cortexlab_sfa_driver's documentation!
-================================================
-
-===================
-Code tree overview
-===================
-
-------
-Driver
-------
-
-The Cortexlab driver source code is under the folder /sfa, along with the other
-testbeds driver folders. The /cortexlab directory contains the necessary files
-defining API for LDAP, the postgresql database as well as for the SFA
-managers.
-
-CortexlabShell
---------------
-
-**fill missing code in this class**
-
-This class contains methods to check reserved nodes, leases and launch/delete
-experiments on the testbed. Methods interacting with the testbed have
-to be completed.
-
-Cortexlabnodes
----------------
-
-**fill missing code in this class**
-
-CortexlabQueryTestbed class's goal is to get information from the testbed
-about the site and its nodes.
-There are two types of information about the nodes:
-
-* their properties : hostname, radio type, position, site, node_id and so on.
- (For a complete list of properties, please refer to the method
- get_all_nodes in cortexlabnodes.py).
-
-* their availability, whether the node is currently in use, in a scheduled experiment
- in the future or available. The availability of the nodes can be managed by a
- scheduler or a database. The node's availabity status is modified when it is
- added to/ deleted from an experiment. In SFA, this corresponds to
- creating/deleting a lease involving this node.
-
-Currently, CortexlabQueryTestbed is merely a skeleton of methods that have to be
-implemented with the real testbed API in order to provide the functionality
-they were designed for (see the cortxlabnodes file for further information
-on which methods have to be completed).
-
-
-In the LDAP file, the LDAPapi class is based on the unix schema.
-If this class is reused in another context, it might not work without some bit
-of customization. The naming (turning a hostname into a sfa hrn, a LDAP login
-into a hrn ) is also done in this class.
-
-The cortexlabpostgres file defines a dedicated cortexlab database, separated from the
-SFA database. Its purpose is to hold information that we can't store anywhere
-given the Cortexlab architecture with OAR and LDAP, namely the association of a
-job and the slice hrn for which the job is supposed to run. Indeed, one user
-may register on another federated testbed then use his federated slice to book
-cortexlab nodes. In this case, an Cortexlab LDAP account will be created. Later on,
-when new users will be imported from the LDAP to the SFA database, a Cortexlab
-slice will be created for each new user found in the LDAP. Thus leading us to
-the situation where one user may have the possibility to use different slices
-to book Cortexlab nodes.
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
+++ /dev/null
-cortexlab
-=========
-
-.. toctree::
- :maxdepth: 4
-
- cortexlab
# the importer class
def importer_class (self):
import sfa.importer.iotlabimporter
- return sfa.importer.iotlabimporter.IotlabImporter
+ return sfa.importer.iotlabimporter.IotLabImporter
# the manager classes for the server-side services
def registry_manager_class (self) :
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
- # driver class for server-side services, talk to the whole testbed
def driver_class (self):
import sfa.iotlab.iotlabdriver
- return sfa.iotlab.iotlabdriver.IotlabDriver
+ return sfa.iotlab.iotlabdriver.IotLabDriver
- # iotlab does not have a component manager yet
- # manager class
def component_manager_class (self):
return None
# driver_class
-""" File defining the importer class and all the methods needed to import
-the nodes, users and slices from OAR and LDAP to the SFA database.
-Also creates the iotlab specific table to keep track
-of which slice hrn contains which job.
-"""
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
-from sfa.iotlab.iotlabshell import IotlabShell
-# from sfa.iotlab.iotlabdriver import IotlabDriver
-# from sfa.iotlab.iotlabpostgres import TestbedAdditionalSfaDB
-from sfa.trust.certificate import Keypair, convert_public_key
-from sfa.trust.gid import create_uuid
-
-# using global alchemy.session() here is fine
-# as importer is on standalone one-shot process
-
-from sfa.storage.alchemy import global_dbsession, engine
-from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
- RegUser, RegKey, init_tables
+# -*- coding:utf-8 -*-
+""" Iot-LAB importer class management """
+from sfa.storage.alchemy import engine
+from sfa.storage.model import init_tables
from sqlalchemy import Table, MetaData
-from sqlalchemy.exc import SQLAlchemyError, NoSuchTableError
-
-
+from sqlalchemy.exc import NoSuchTableError
-class IotlabImporter:
+class IotLabImporter:
"""
- IotlabImporter class, generic importer_class. Used to populate the SFA DB
- with iotlab resources' records.
- Used to update records when new resources, users or nodes, are added
- or deleted.
+ Creates the iotlab specific lease table to keep track
+ of which slice hrn match OAR job
"""
def __init__(self, auth_hierarchy, loc_logger):
- """
- Sets and defines import logger and the authority name. Gathers all the
- records already registerd in the SFA DB, broke them into 3 dicts, by
- type and hrn, by email and by type and pointer.
-
- :param auth_hierarchy: authority name
- :type auth_hierarchy: string
- :param loc_logger: local logger
- :type loc_logger: _SfaLogger
-
- """
- self.auth_hierarchy = auth_hierarchy
self.logger = loc_logger
self.logger.setLevelDebug()
- #retrieve all existing SFA objects
- self.all_records = global_dbsession.query(RegRecord).all()
-
- # initialize record.stale to True by default,
- # then mark stale=False on the ones that are in use
- for record in self.all_records:
- record.stale = True
- #create hash by (type,hrn)
- #used to know if a given record is already known to SFA
- self.records_by_type_hrn = \
- dict([((record.type, record.hrn), record)
- for record in self.all_records])
-
- self.users_rec_by_email = \
- dict([(record.email, record)
- for record in self.all_records if record.type == 'user'])
-
- # create hash by (type,pointer)
- self.records_by_type_pointer = \
- dict([((str(record.type), record.pointer), record)
- for record in self.all_records if record.pointer != -1])
-
-
-
- def exists(self, tablename):
+ def _exists(self, tablename):
"""
- Checks if the table specified as tablename exists.
- :param tablename: name of the table in the db that has to be checked.
- :type tablename: string
- :returns: True if the table exists, False otherwise.
- :rtype: bool
-
+ Checks if the table exists in SFA database.
"""
metadata = MetaData(bind=engine)
try:
- table = Table(tablename, metadata, autoload=True)
+ Table(tablename, metadata, autoload=True)
return True
except NoSuchTableError:
- self.logger.log_exc("Iotlabimporter tablename %s does not exist"
- % (tablename))
return False
-
-
- @staticmethod
- def hostname_to_hrn_escaped(root_auth, hostname):
- """
-
- Returns a node's hrn based on its hostname and the root authority and by
- removing special caracters from the hostname.
-
- :param root_auth: root authority name
- :param hostname: nodes's hostname
- :type root_auth: string
- :type hostname: string
- :rtype: string
- """
- return '.'.join([root_auth, Xrn.escape(hostname)])
-
-
- @staticmethod
- def slicename_to_hrn(person_hrn):
- """
-
- Returns the slicename associated to a given person's hrn.
-
- :param person_hrn: user's hrn
- :type person_hrn: string
- :rtype: string
- """
- return (person_hrn + '_slice')
-
- def add_options(self, parser):
- """
- .. warning:: not used
- """
- # we don't have any options for now
- pass
-
- def find_record_by_type_hrn(self, record_type, hrn):
- """
- Finds the record associated with the hrn and its type given in parameter
- if the tuple (hrn, type hrn) is an existing key in the dictionary.
-
- :param record_type: the record's type (slice, node, authority...)
- :type record_type: string
- :param hrn: Human readable name of the object's record
- :type hrn: string
- :returns: Returns the record associated with a given hrn and hrn type.
- Returns None if the key tuple is not in the dictionary.
- :rtype: RegUser if user, RegSlice if slice, RegNode if node...or None if
- record does not exist.
-
- """
- return self.records_by_type_hrn.get((record_type, hrn), None)
-
- def locate_by_type_pointer(self, record_type, pointer):
- """
- Returns the record corresponding to the key pointer and record type.
- Returns None if the record does not exist and is not in the
- records_by_type_pointer dictionnary.
-
- :param record_type: the record's type (slice, node, authority...)
- :type record_type: string
- :param pointer: Pointer to where the record is in the origin db,
- used in case the record comes from a trusted authority.
- :type pointer: integer
- :rtype: RegUser if user, RegSlice if slice, RegNode if node, or None if
- record does not exist.
- """
- return self.records_by_type_pointer.get((record_type, pointer), None)
-
-
- def update_just_added_records_dict(self, record):
- """
-
- Updates the records_by_type_hrn dictionnary if the record has
- just been created.
-
- :param record: Record to add in the records_by_type_hrn dict.
- :type record: dictionary
- """
- rec_tuple = (record.type, record.hrn)
- if rec_tuple in self.records_by_type_hrn:
- self.logger.warning("IotlabImporter.update_just_added_records_dict:\
- duplicate (%s,%s)" % rec_tuple)
- return
- self.records_by_type_hrn[rec_tuple] = record
-
-
- def import_nodes(self, site_node_ids, nodes_by_id, testbed_shell):
- """
-
- Creates appropriate hostnames and RegNode records for each node in
- site_node_ids, based on the information given by the dict nodes_by_id
- that was made from data from OAR. Saves the records to the DB.
-
- :param site_node_ids: site's node ids
- :type site_node_ids: list of integers
- :param nodes_by_id: dictionary , key is the node id, value is the a dict
- with node information.
- :type nodes_by_id: dictionary
- :param testbed_shell: IotlabDriver object, used to have access to
- testbed_shell attributes.
- :type testbed_shell: IotlabDriver
-
- :returns: None
- :rtype: None
-
- """
-
- for node_id in site_node_ids:
- try:
- node = nodes_by_id[node_id]
- except KeyError:
- self.logger.warning("IotlabImporter: cannot find node_id %s \
- - ignored" % (node_id))
- continue
- escaped_hrn = \
- self.hostname_to_hrn_escaped(testbed_shell.root_auth,
- node['hostname'])
- self.logger.info("IOTLABIMPORTER node %s " % (node))
- hrn = node['hrn']
-
- # xxx this sounds suspicious
- if len(hrn) > 64:
- hrn = hrn[:64]
- node_record = self.find_record_by_type_hrn('node', hrn)
- if not node_record:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(escaped_hrn, 'node')
- node_gid = \
- self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
-
- def testbed_get_authority(hrn):
- """ Gets the authority part in the hrn.
- :param hrn: hrn whose authority we are looking for.
- :type hrn: string
- :returns: splits the hrn using the '.' separator and returns
- the authority part of the hrn.
- :rtype: string
-
- """
- return hrn.split(".")[0]
-
- node_record = RegNode(hrn=hrn, gid=node_gid,
- pointer='-1',
- authority=testbed_get_authority(hrn))
- try:
-
- node_record.just_created()
- global_dbsession.add(node_record)
- global_dbsession.commit()
- self.logger.info("IotlabImporter: imported node: %s"
- % node_record)
- self.update_just_added_records_dict(node_record)
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to import node")
- else:
- #TODO: xxx update the record ...
- pass
- node_record.stale = False
-
- def import_sites_and_nodes(self, testbed_shell):
- """
-
- Gets all the sites and nodes from OAR, process the information,
- creates hrns and RegAuthority for sites, and feed them to the database.
- For each site, import the site's nodes to the DB by calling
- import_nodes.
-
- :param testbed_shell: IotlabDriver object, used to have access to
- testbed_shell methods and fetching info on sites and nodes.
- :type testbed_shell: IotlabDriver
- """
-
- sites_listdict = testbed_shell.GetSites()
- nodes_listdict = testbed_shell.GetNodes()
- nodes_by_id = dict([(node['node_id'], node) for node in nodes_listdict])
- for site in sites_listdict:
- site_hrn = site['name']
- site_record = self.find_record_by_type_hrn ('authority', site_hrn)
- self.logger.info("IotlabImporter: import_sites_and_nodes \
- (site) %s \r\n " % site_record)
- if not site_record:
- try:
- urn = hrn_to_urn(site_hrn, 'authority')
- if not self.auth_hierarchy.auth_exists(urn):
- self.auth_hierarchy.create_auth(urn)
-
- auth_info = self.auth_hierarchy.get_auth_info(urn)
- site_record = \
- RegAuthority(hrn=site_hrn,
- gid=auth_info.get_gid_object(),
- pointer='-1',
- authority=get_authority(site_hrn))
- site_record.just_created()
- global_dbsession.add(site_record)
- global_dbsession.commit()
- self.logger.info("IotlabImporter: imported authority \
- (site) %s" % site_record)
- self.update_just_added_records_dict(site_record)
- except SQLAlchemyError:
- # if the site import fails then there is no point in
- # trying to import the
- # site's child records(node, slices, persons), so skip them.
- self.logger.log_exc("IotlabImporter: failed to import \
- site. Skipping child records")
- continue
- else:
- # xxx update the record ...
- pass
-
- site_record.stale = False
- self.import_nodes(site['node_ids'], nodes_by_id, testbed_shell)
-
- return
-
-
-
- def init_person_key(self, person, iotlab_key):
- """
- Returns a tuple pubkey and pkey.
-
- :param person Person's data.
- :type person: dict
- :param iotlab_key: SSH public key, from LDAP user's data. RSA type
- supported.
- :type iotlab_key: string
- :rtype: (string, Keypair)
-
- """
- pubkey = None
- if person['pkey']:
- # randomly pick first key in set
- pubkey = iotlab_key
-
- try:
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- self.logger.warn("IotlabImporter: \
- unable to convert public \
- key for %s" % person['hrn'])
- pkey = Keypair(create=True)
-
- else:
- # the user has no keys.
- #Creating a random keypair for the user's gid
- self.logger.warn("IotlabImporter: person %s does not have a \
- public key" % (person['hrn']))
- pkey = Keypair(create=True)
- return (pubkey, pkey)
-
- def import_persons_and_slices(self, testbed_shell):
- """
-
- Gets user data from LDAP, process the information.
- Creates hrn for the user's slice, the user's gid, creates
- the RegUser record associated with user. Creates the RegKey record
- associated nwith the user's key.
- Saves those records into the SFA DB.
- import the user's slice onto the database as well by calling
- import_slice.
-
- :param testbed_shell: IotlabDriver object, used to have access to
- testbed_shell attributes.
- :type testbed_shell: IotlabDriver
-
- .. warning:: does not support multiple keys per user
- """
- ldap_person_listdict = testbed_shell.GetPersons()
- self.logger.info("IOTLABIMPORT \t ldap_person_listdict %s \r\n"
- % (ldap_person_listdict))
-
- # import persons
- for person in ldap_person_listdict:
-
- self.logger.info("IotlabImporter: person :" % (person))
- if 'ssh-rsa' not in person['pkey']:
- #people with invalid ssh key (ssh-dss, empty, bullshit keys...)
- #won't be imported
- continue
- person_hrn = person['hrn']
- slice_hrn = self.slicename_to_hrn(person['hrn'])
-
- # xxx suspicious again
- if len(person_hrn) > 64:
- person_hrn = person_hrn[:64]
- person_urn = hrn_to_urn(person_hrn, 'user')
-
-
- self.logger.info("IotlabImporter: users_rec_by_email %s "
- % (self.users_rec_by_email))
-
- #Check if user using person['email'] from LDAP is already registered
- #in SFA. One email = one person. In this case, do not create another
- #record for this person
- #person_hrn returned by GetPerson based on iotlab root auth +
- #uid ldap
- user_record = self.find_record_by_type_hrn('user', person_hrn)
-
- if not user_record and person['email'] in self.users_rec_by_email:
- user_record = self.users_rec_by_email[person['email']]
- person_hrn = user_record.hrn
- person_urn = hrn_to_urn(person_hrn, 'user')
-
-
- slice_record = self.find_record_by_type_hrn('slice', slice_hrn)
-
- iotlab_key = person['pkey']
- # new person
- if not user_record:
- (pubkey, pkey) = self.init_person_key(person, iotlab_key)
- if pubkey is not None and pkey is not None:
- person_gid = \
- self.auth_hierarchy.create_gid(person_urn,
- create_uuid(), pkey)
- if person['email']:
- self.logger.debug("IOTLAB IMPORTER \
- PERSON EMAIL OK email %s " % (person['email']))
- person_gid.set_email(person['email'])
- user_record = \
- RegUser(hrn=person_hrn,
- gid=person_gid,
- pointer='-1',
- authority=get_authority(person_hrn),
- email=person['email'])
- else:
- user_record = \
- RegUser(hrn=person_hrn,
- gid=person_gid,
- pointer='-1',
- authority=get_authority(person_hrn))
-
- if pubkey:
- user_record.reg_keys = [RegKey(pubkey)]
- else:
- self.logger.warning("No key found for user %s"
- % (user_record))
-
- try:
- user_record.just_created()
- global_dbsession.add (user_record)
- global_dbsession.commit()
- self.logger.info("IotlabImporter: imported person \
- %s" % (user_record))
- self.update_just_added_records_dict(user_record)
-
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: \
- failed to import person %s" % (person))
- else:
- # update the record ?
- # if user's primary key has changed then we need to update
- # the users gid by forcing an update here
- sfa_keys = user_record.reg_keys
-
- new_key = False
- if iotlab_key is not sfa_keys:
- new_key = True
- if new_key:
- self.logger.info("IotlabImporter: \t \t USER UPDATE \
- person: %s" % (person['hrn']))
- (pubkey, pkey) = self.init_person_key(person, iotlab_key)
- person_gid = \
- self.auth_hierarchy.create_gid(person_urn,
- create_uuid(), pkey)
- if not pubkey:
- user_record.reg_keys = []
- else:
- user_record.reg_keys = [RegKey(pubkey)]
- self.logger.info("IotlabImporter: updated person: %s"
- % (user_record))
-
- if person['email']:
- user_record.email = person['email']
-
- try:
- global_dbsession.commit()
- user_record.stale = False
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: \
- failed to update person %s"% (person))
-
- self.import_slice(slice_hrn, slice_record, user_record)
-
-
- def import_slice(self, slice_hrn, slice_record, user_record):
- """
-
- Create RegSlice record according to the slice hrn if the slice
- does not exist yet.Creates a relationship with the user record
- associated with the slice.
- Commit the record to the database.
-
-
- :param slice_hrn: Human readable name of the slice.
- :type slice_hrn: string
- :param slice_record: record of the slice found in the DB, if any.
- :type slice_record: RegSlice or None
- :param user_record: user record found in the DB if any.
- :type user_record: RegUser
-
- .. todo::Update the record if a slice record already exists.
- """
- if not slice_record:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(slice_hrn, 'slice')
- slice_gid = \
- self.auth_hierarchy.create_gid(urn,
- create_uuid(), pkey)
- slice_record = RegSlice(hrn=slice_hrn, gid=slice_gid,
- pointer='-1',
- authority=get_authority(slice_hrn))
- try:
- slice_record.just_created()
- global_dbsession.add(slice_record)
- global_dbsession.commit()
-
-
- self.update_just_added_records_dict(slice_record)
-
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to import slice")
-
- #No slice update upon import in iotlab
- else:
- # xxx update the record ...
- self.logger.warning("Iotlab Slice update not implemented")
-
- # record current users affiliated with the slice
- slice_record.reg_researchers = [user_record]
- try:
- global_dbsession.commit()
- slice_record.stale = False
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to update slice")
-
+
def run(self, options):
- """
- Create the special iotlab table, lease_table, in the SFA database.
- Import everything (users, slices, nodes and sites from OAR
- and LDAP) into the SFA database.
- Delete stale records that are no longer in OAR or LDAP.
- :param options:
- :type options:
- """
-
- config = Config ()
- interface_hrn = config.SFA_INTERFACE_HRN
- root_auth = config.SFA_REGISTRY_ROOT_AUTH
-
- testbed_shell = IotlabShell(config)
- # leases_db = TestbedAdditionalSfaDB(config)
- #Create special slice table for iotlab
-
- if not self.exists('lease_table'):
+ """ Run importer"""
+ if not self._exists('lease_table'):
init_tables(engine)
- self.logger.info("IotlabImporter.run: lease_table table created ")
-
- # import site and node records in site into the SFA db.
- self.import_sites_and_nodes(testbed_shell)
- #import users and slice into the SFA DB.
- #self.import_persons_and_slices(testbed_shell)
-
- ### remove stale records
- # special records must be preserved
- system_hrns = [interface_hrn, root_auth,
- interface_hrn + '.slicemanager']
- for record in self.all_records:
- if record.hrn in system_hrns:
- record.stale = False
- if record.peer_authority:
- record.stale = False
-
- for record in self.all_records:
- if record.type == 'user':
- self.logger.info("IotlabImporter: stale records: hrn %s %s"
- % (record.hrn, record.stale))
- try:
- stale = record.stale
- except:
- stale = True
- self.logger.warning("stale not found with %s" % record)
- if stale:
- self.logger.info("IotlabImporter: deleting stale record: %s"
- % (record))
-
- try:
- global_dbsession.delete(record)
- global_dbsession.commit()
- except SQLAlchemyError:
- self.logger.log_exc("IotlabImporter: failed to delete \
- stale record %s" % (record))
+ self.logger.info("iotlabimporter run lease_table created")
+++ /dev/null
-"""
-This API is adapted for OpenLDAP. The file contains all LDAP classes and methods
-needed to:
-- Load the LDAP connection configuration file (login, address..) with LdapConfig
-- Connect to LDAP with ldap_co
-- Create a unique LDAP login and password for a user based on his email or last
-name and first name with LoginPassword.
-- Manage entries in LDAP using SFA records with LDAPapi (Search, Add, Delete,
-Modify)
-
-"""
-import random
-from passlib.hash import ldap_salted_sha1 as lssha
-
-from sfa.util.xrn import get_authority
-from sfa.util.sfalogging import logger
-from sfa.util.config import Config
-
-import ldap
-import ldap.modlist as modlist
-
-import os.path
-
-
-class LdapConfig():
- """
- Ldap configuration class loads the configuration file and sets the
- ldap IP address, password, people dn, web dn, group dn. All these settings
- were defined in a separate file ldap_config.py to avoid sharing them in
- the SFA git as it contains sensible information.
-
- """
- def __init__(self, config_file='/etc/sfa/ldap_config.py'):
- """Loads configuration from file /etc/sfa/ldap_config.py and set the
- parameters for connection to LDAP.
-
- """
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
-
-
-class ldap_co:
- """ Set admin login and server configuration variables."""
-
- def __init__(self):
- """Fetch LdapConfig attributes (Ldap server connection parameters and
- defines port , version and subtree scope.
-
- """
- #Iotlab PROD LDAP parameters
- self.ldapserv = None
- ldap_config = LdapConfig()
- self.config = ldap_config
- self.ldapHost = ldap_config.LDAP_IP_ADDRESS
- self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
- self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
- self.ldapAdminDN = ldap_config.LDAP_WEB_DN
- self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
- self.ldapPort = ldap.PORT
- self.ldapVersion = ldap.VERSION3
- self.ldapSearchScope = ldap.SCOPE_SUBTREE
-
- def connect(self, bind=True):
- """Enables connection to the LDAP server.
-
- :param bind: Set the bind parameter to True if a bind is needed
- (for add/modify/delete operations). Set to False otherwise.
- :type bind: boolean
- :returns: dictionary with status of the connection. True if Successful,
- False if not and in this case the error
- message( {'bool', 'message'} ).
- :rtype: dict
-
- """
- try:
- self.ldapserv = ldap.open(self.ldapHost)
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
- # Bind with authentification
- if(bind):
- return self.bind()
-
- else:
- return {'bool': True}
-
- def bind(self):
- """ Binding method.
-
- :returns: dictionary with the bind status. True if Successful,
- False if not and in this case the error message({'bool','message'})
- :rtype: dict
-
- """
- try:
- # Opens a connection after a call to ldap.open in connect:
- self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
-
- # Bind/authenticate with a user with apropriate
- #rights to add objects
- self.ldapserv.simple_bind_s(self.ldapAdminDN,
- self.ldapAdminPassword)
- return {'bool': True}
-
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
-
-
- def close(self):
- """Close the LDAP connection.
-
- Can throw an exception if the unbinding fails.
-
- :returns: dictionary with the bind status if the unbinding failed and
- in this case the dict contains an error message. The dictionary keys
- are : ({'bool','message'})
- :rtype: dict or None
-
- """
- try:
- self.ldapserv.unbind_s()
- except ldap.LDAPError, error:
- return {'bool': False, 'message': error}
-
-
-class LoginPassword():
- """
-
- Class to handle login and password generation, using custom login generation
- algorithm.
-
- """
- def __init__(self):
- """
-
- Sets password and login maximum length, and defines the characters that
- can be found in a random generated password.
-
- """
- self.login_max_length = 8
- self.length_password = 8
- self.chars_password = ['!', '$', '(',')', '*', '+', ',', '-', '.',
- '0', '1', '2', '3', '4', '5', '6', '7', '8',
- '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
- 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
- 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
- '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h',
- 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
- 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
- '\'']
-
- @staticmethod
- def clean_user_names(record):
- """
-
- Removes special characters such as '-', '_' , '[', ']' and ' ' from the
- first name and last name.
-
- :param record: user's record
- :type record: dict
- :returns: lower_first_name and lower_last_name if they were found
- in the user's record. Return None, none otherwise.
- :rtype: string, string or None, None.
-
- """
- if 'first_name' in record and 'last_name' in record:
- #Remove all special characters from first_name/last name
- lower_first_name = record['first_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- lower_last_name = record['last_name'].replace('-', '')\
- .replace('_', '').replace('[', '')\
- .replace(']', '').replace(' ', '')\
- .lower()
- return lower_first_name, lower_last_name
- else:
- return None, None
-
- @staticmethod
- def extract_name_from_email(record):
- """
-
- When there is no valid first name and last name in the record,
- the email is used to generate the login. Here, we assume the email
- is firstname.lastname@something.smthg. The first name and last names
- are extracted from the email, special charcaters are removed and
- they are changed into lower case.
-
- :param record: user's data
- :type record: dict
- :returns: the first name and last name taken from the user's email.
- lower_first_name, lower_last_name.
- :rtype: string, string
-
- """
-
- email = record['email']
- email = email.split('@')[0].lower()
- lower_first_name = None
- lower_last_name = None
- #Assume there is first name and last name in email
- #if there is a separator
- separator_list = ['.', '_', '-']
- for sep in separator_list:
- if sep in email:
- mail = email.split(sep)
- lower_first_name = mail[0]
- lower_last_name = mail[1]
- break
-
- #Otherwise just take the part before the @ as the
- #lower_first_name and lower_last_name
- if lower_first_name is None:
- lower_first_name = email
- lower_last_name = email
-
- return lower_first_name, lower_last_name
-
- def get_user_firstname_lastname(self, record):
- """
-
- Get the user first name and last name from the information we have in
- the record.
-
- :param record: user's information
- :type record: dict
- :returns: the user's first name and last name.
-
- .. seealso:: clean_user_names
- .. seealso:: extract_name_from_email
-
- """
- lower_first_name, lower_last_name = self.clean_user_names(record)
-
- #No first name and last name check email
- if lower_first_name is None and lower_last_name is None:
-
- lower_first_name, lower_last_name = \
- self.extract_name_from_email(record)
-
- return lower_first_name, lower_last_name
-
- # XXX JORDAN: This function writes an error in the log but returns normally :))
- def choose_sets_chars_for_login(self, lower_first_name, lower_last_name):
- """
-
- Algorithm to select sets of characters from the first name and last
- name, depending on the lenght of the last name and the maximum login
- length which in our case is set to 8 characters.
-
- :param lower_first_name: user's first name in lower case.
- :param lower_last_name: usr's last name in lower case.
- :returns: user's login
- :rtype: string
-
- """
- length_last_name = len(lower_last_name)
- self.login_max_length = 8
-
- #Try generating a unique login based on first name and last name
-
- if length_last_name >= self.login_max_length:
- login = lower_last_name[0:self.login_max_length]
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name >= 4:
- login = lower_last_name
- index = 0
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 3:
- login = lower_first_name[0:1] + lower_last_name
- index = 1
- logger.debug("login : %s index : %s" % (login, index))
- elif length_last_name == 2:
- if len(lower_first_name) >= 2:
- login = lower_first_name[0:2] + lower_last_name
- index = 2
- logger.debug("login : %s index : %s" % (login, index))
- else:
- logger.error("LoginException : \
- Generation login error with \
- minimum four characters")
-
- else:
- logger.error("LDAP LdapGenerateUniqueLogin failed : \
- impossible to generate unique login for %s %s"
- % (lower_first_name, lower_last_name))
- logger.debug("JORDAN choose_sets_chars_for_login %d %s" % (index, login))
- return index, login
-
- def generate_password(self):
- """
-
- Generate a password upon adding a new user in LDAP Directory
- (8 characters length). The generated password is composed of characters
- from the chars_password list.
-
- :returns: the randomly generated password
- :rtype: string
-
- """
- password = str()
-
- length = len(self.chars_password)
- for index in range(self.length_password):
- char_index = random.randint(0, length - 1)
- password += self.chars_password[char_index]
-
- return password
-
- @staticmethod
- def encrypt_password(password):
- """
-
- Use passlib library to make a RFC2307 LDAP encrypted password salt size
- is 8, use sha-1 algorithm.
-
- :param password: password not encrypted.
- :type password: string
- :returns: Returns encrypted password.
- :rtype: string
-
- """
- #Keep consistency with Java Iotlab's LDAP API
- #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
- return lssha.encrypt(password, salt_size=8)
-
-
-class LDAPapi:
- """Defines functions to insert and search entries in the LDAP.
-
- .. note:: class supposes the unix schema is used
-
- """
- def __init__(self):
- logger.setLevelDebug()
-
- #SFA related config
-
- config = Config()
- self.login_pwd = LoginPassword()
- self.authname = config.SFA_REGISTRY_ROOT_AUTH
- self.conn = ldap_co()
- self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
- self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
- self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
- self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
- self.baseDN = self.conn.ldapPeopleDN
- self.ldapShell = '/bin/bash'
-
-
- def LdapGenerateUniqueLogin(self, record):
- """
-
- Generate login for adding a new user in LDAP Directory
- (four characters minimum length). Get proper last name and
- first name so that the user's login can be generated.
-
- :param record: Record must contain first_name and last_name.
- :type record: dict
- :returns: the generated login for the user described with record if the
- login generation is successful, None if it fails.
- :rtype: string or None
-
- """
- #For compatibility with other ldap func
- if 'mail' in record and 'email' not in record:
- record['email'] = record['mail']
-
- lower_first_name, lower_last_name = \
- self.login_pwd.get_user_firstname_lastname(record)
-
- index, login = self.login_pwd.choose_sets_chars_for_login(
- lower_first_name, lower_last_name)
-
- login_filter = '(uid=' + login + ')'
- get_attrs = ['uid']
- try:
- #Check if login already in use
-
- while (len(self.LdapSearch(login_filter, get_attrs)) is not 0):
-
- index += 1
- if index >= 9:
- logger.error("LoginException : Generation login error \
- with minimum four characters")
- break
- else:
- try:
- login = \
- lower_first_name[0:index] + \
- lower_last_name[0:
- self.login_pwd.login_max_length
- - index]
- logger.debug("JORDAN trying login: %r" % login)
- login_filter = '(uid=' + login + ')'
- except KeyError:
- print "lower_first_name - lower_last_name too short"
-
- logger.debug("LDAP.API \t LdapGenerateUniqueLogin login %s"
- % (login))
- return login
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapGenerateUniqueLogin Error %s" % (error))
- return None
-
- def find_max_uidNumber(self):
- """Find the LDAP max uidNumber (POSIX uid attribute).
-
- Used when adding a new user in LDAP Directory
-
- :returns: max uidNumber + 1
- :rtype: string
-
- """
- #First, get all the users in the LDAP
- get_attrs = "(uidNumber=*)"
- login_filter = ['uidNumber']
-
- result_data = self.LdapSearch(get_attrs, login_filter)
- #It there is no user in LDAP yet, First LDAP user
- if result_data == []:
- max_uidnumber = self.ldapUserUidNumberMin
- #Otherwise, get the highest uidNumber
- else:
- uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data]
- logger.debug("LDAPapi.py \tfind_max_uidNumber \
- uidNumberList %s " % (uidNumberList))
- max_uidnumber = max(uidNumberList) + 1
-
- return str(max_uidnumber)
-
-
- def get_ssh_pkey(self, record):
- """TODO ; Get ssh public key from sfa record
- To be filled by N. Turro ? or using GID pl way?
-
- """
- return 'A REMPLIR '
-
- @staticmethod
- #TODO Handle OR filtering in the ldap query when
- #dealing with a list of records instead of doing a for loop in GetPersons
- def make_ldap_filters_from_record(record=None):
- """Helper function to make LDAP filter requests out of SFA records.
-
- :param record: user's sfa record. Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: LDAP request
- :rtype: string
-
- """
- logger.debug("JORDAN make_ldap_filters_from_record: %r" % record)
- req_ldap = ''
- req_ldapdict = {}
- if record :
- if 'first_name' in record and 'last_name' in record:
- if record['first_name'] != record['last_name']:
- req_ldapdict['cn'] = str(record['first_name'])+" "\
- + str(record['last_name'])
- if 'uid' in record:
- req_ldapdict['uid'] = record['uid']
- if 'email' in record:
- req_ldapdict['mail'] = record['email']
- if 'mail' in record:
- req_ldapdict['mail'] = record['mail']
- if 'enabled' in record:
- if record['enabled'] is True:
- req_ldapdict['shadowExpire'] = '-1'
- else:
- req_ldapdict['shadowExpire'] = '0'
-
- #Hrn should not be part of the filter because the hrn
- #presented by a certificate of a SFA user not imported in
- #Iotlab does not include the iotlab login in it
- #Plus, the SFA user may already have an account with iotlab
- #using another login.
-
- logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
- record %s req_ldapdict %s"
- % (record, req_ldapdict))
-
- for k in req_ldapdict:
- req_ldap += '(' + str(k) + '=' + str(req_ldapdict[k]) + ')'
- if len(req_ldapdict.keys()) >1 :
- req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
- size = len(req_ldap)
- req_ldap = req_ldap[:(size-1)] + ')' + req_ldap[(size-1):]
- else:
- req_ldap = "(cn=*)"
-
- return req_ldap
-
- def make_ldap_attributes_from_record(self, record):
- """
-
- When adding a new user to Iotlab's LDAP, creates an attributes
- dictionnary from the SFA record understandable by LDAP. Generates the
- user's LDAP login.User is automatically validated (account enabled)
- and described as a SFA USER FROM OUTSIDE IOTLAB.
-
- :param record: must contain the following keys and values:
- first_name, last_name, mail, pkey (ssh key).
- :type record: dict
- :returns: dictionary of attributes using LDAP data structure model.
- :rtype: dict
-
- """
- logger.debug("JORDAN make_ldap_attributes_from_record: %r" % record)
-
- attrs = {}
- attrs['objectClass'] = ["top", "person", "inetOrgPerson",
- "organizationalPerson", "posixAccount",
- "shadowAccount", "systemQuotas",
- "ldapPublicKey"]
-
- attrs['uid'] = self.LdapGenerateUniqueLogin(record)
- try:
- attrs['givenName'] = str(record['first_name']).lower().capitalize()
- attrs['sn'] = str(record['last_name']).lower().capitalize()
- attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
- attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
-
- except KeyError:
- attrs['givenName'] = attrs['uid']
- attrs['sn'] = attrs['uid']
- attrs['cn'] = attrs['uid']
- attrs['gecos'] = attrs['uid']
-
- attrs['quota'] = self.ldapUserQuotaNFS
- attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
- attrs['loginShell'] = self.ldapShell
- attrs['gidNumber'] = self.ldapUserGidNumber
- attrs['uidNumber'] = self.find_max_uidNumber()
- attrs['mail'] = record['mail'].lower()
- try:
- attrs['sshPublicKey'] = record['pkey']
- except KeyError:
- attrs['sshPublicKey'] = self.get_ssh_pkey(record)
-
-
- #Password is automatically generated because SFA user don't go
- #through the Iotlab website used to register new users,
- #There is no place in SFA where users can enter such information
- #yet.
- #If the user wants to set his own password , he must go to the Iotlab
- #website.
- password = self.login_pwd.generate_password()
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
-
- #Account automatically validated (no mail request to admins)
- #Set to 0 to disable the account, -1 to enable it,
- attrs['shadowExpire'] = '-1'
-
- #Motivation field in Iotlab
- attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
-
- attrs['ou'] = 'SFA' #Optional: organizational unit
- #No info about those here:
- attrs['l'] = 'To be defined'#Optional: Locality.
- attrs['st'] = 'To be defined' #Optional: state or province (country).
-
- return attrs
-
-
- def LdapAddUser(self, record) :
- """Add SFA user to LDAP if it is not in LDAP yet.
-
- :param record: dictionnary with the user's data.
- :returns: a dictionary with the status (Fail= False, Success= True)
- and the uid of the newly added user if successful, or the error
- message it is not. Dict has keys bool and message in case of
- failure, and bool uid in case of success.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record
-
- """
- filter_by = self.make_ldap_filters_from_record({'email' : record['email']})
- user = self.LdapSearch(filter_by)
- if user:
- logger.debug("LDAPapi.py user ldap exist \t%s" % user)
- # user = [('uid=saint,ou=People,dc=senslab,dc=info', {'uid': ['saint'], 'givenName': ['Fred'], ...})]
- return {'bool': True, 'uid': user[0][1]['uid'][0]}
- else:
- user_ldap_attrs = self.make_ldap_attributes_from_record(record)
- result = self.conn.connect()
- if(result['bool']):
- logger.debug("LDAPapi.py user ldap doesn't exist \t%s" % user_ldap_attrs)
- # The dn of our new entry/object
- dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
- try:
- ldif = modlist.addModlist(user_ldap_attrs)
- self.conn.ldapserv.add_s(dn, ldif)
- self.conn.close()
- return {'bool': True, 'uid': user_ldap_attrs['uid']}
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Add Error %s" % error)
- return {'bool': False, 'message': error}
-
-
-
-
- def LdapDelete(self, person_dn):
- """Deletes a person in LDAP. Uses the dn of the user.
-
- :param person_dn: user's ldap dn.
- :type person_dn: string
- :returns: dictionary with bool True if successful, bool False
- and the error if not.
- :rtype: dict
-
- """
- #Connect and bind
- result = self.conn.connect()
- if(result['bool']):
- try:
- self.conn.ldapserv.delete_s(person_dn)
- self.conn.close()
- return {'bool': True}
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP Delete Error %s" % error)
- return {'bool': False, 'message': error}
-
- def LdapDeleteUser(self, record_filter):
- """Deletes a SFA person in LDAP, based on the user's hrn.
-
- :param record_filter: Filter to find the user to be deleted. Must
- contain at least the user's email.
- :type record_filter: dict
- :returns: dict with bool True if successful, bool False and error
- message otherwise.
- :rtype: dict
-
- .. seealso:: LdapFindUser docstring for more info on record filter.
- .. seealso:: LdapDelete for user deletion
-
- """
- #Find uid of the person
- person = self.LdapFindUser(record_filter, [])
- logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s"
- % (record_filter, person))
-
- if person:
- dn = 'uid=' + person['uid'] + "," + self.baseDN
- else:
- return {'bool': False}
-
- result = self.LdapDelete(dn)
- return result
-
- def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
- """ Modifies a LDAP entry, replaces user's old attributes with
- the new ones given.
-
- :param dn: user's absolute name in the LDAP hierarchy.
- :param old_attributes_dict: old user's attributes. Keys must match
- the ones used in the LDAP model.
- :param new_attributes_dict: new user's attributes. Keys must match
- the ones used in the LDAP model.
- :type dn: string
- :type old_attributes_dict: dict
- :type new_attributes_dict: dict
- :returns: dict bool True if Successful, bool False if not.
- :rtype: dict
-
- """
-
- ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
- # Connect and bind/authenticate
- result = self.conn.connect()
- if (result['bool']):
- try:
- self.conn.ldapserv.modify_s(dn, ldif)
- self.conn.close()
- return {'bool': True}
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapModify Error %s" % error)
- return {'bool': False}
-
-
- def LdapModifyUser(self, user_record, new_attributes_dict):
- """
-
- Gets the record from one user based on the user sfa recordand changes
- the attributes according to the specified new_attributes. Do not use
- this if we need to modify the uid. Use a ModRDN operation instead
- ( modify relative DN ).
-
- :param user_record: sfa user record.
- :param new_attributes_dict: new user attributes, keys must be the
- same as the LDAP model.
- :type user_record: dict
- :type new_attributes_dict: dict
- :returns: bool True if successful, bool False if not.
- :rtype: dict
-
- .. seealso:: make_ldap_filters_from_record for info on what is mandatory
- in the user_record.
- .. seealso:: make_ldap_attributes_from_record for the LDAP objectclass.
-
- """
- if user_record is None:
- logger.error("LDAP \t LdapModifyUser Need user record ")
- return {'bool': False}
-
- #Get all the attributes of the user_uid_login
- #person = self.LdapFindUser(record_filter,[])
- req_ldap = self.make_ldap_filters_from_record(user_record)
- person_list = self.LdapSearch(req_ldap, [])
- logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s"
- % (person_list))
-
- if person_list and len(person_list) > 1:
- logger.error("LDAP \t LdapModifyUser Too many users returned")
- return {'bool': False}
- if person_list is None:
- logger.error("LDAP \t LdapModifyUser User %s doesn't exist "
- % (user_record))
- return {'bool': False}
-
- # The dn of our existing entry/object
- #One result only from ldapSearch
- person = person_list[0][1]
- dn = 'uid=' + person['uid'][0] + "," + self.baseDN
-
- if new_attributes_dict:
- old = {}
- for k in new_attributes_dict:
- if k not in person:
- old[k] = ''
- else:
- old[k] = person[k]
- logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"
- % (new_attributes_dict))
- result = self.LdapModify(dn, old, new_attributes_dict)
- return result
- else:
- logger.error("LDAP \t LdapModifyUser No new attributes given. ")
- return {'bool': False}
-
-
- def LdapMarkUserAsDeleted(self, record):
- """
-
- Sets shadowExpire to 0, disabling the user in LDAP. Calls LdapModifyUser
- to change the shadowExpire of the user.
-
- :param record: the record of the user who has to be disabled.
- Should contain first_name,last_name, email or mail, and if the
- record is enabled or not. If the dict record does not have all of
- these, must at least contain the user's email.
- :type record: dict
- :returns: {bool: True} if successful or {bool: False} if not
- :rtype: dict
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
- """
-
- new_attrs = {}
- #Disable account
- new_attrs['shadowExpire'] = '0'
- logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
- ret = self.LdapModifyUser(record, new_attrs)
- return ret
-
- def LdapResetPassword(self, record):
- """Resets password for the user whose record is the parameter and
- changes the corresponding entry in the LDAP.
-
- :param record: user's sfa record whose Ldap password must be reset.
- Should contain first_name,last_name,
- email or mail, and if the record is enabled or not. If the dict
- record does not have all of these, must at least contain the user's
- email.
- :type record: dict
- :returns: return value of LdapModifyUser. True if successful, False
- otherwise.
-
- .. seealso:: LdapModifyUser, make_ldap_attributes_from_record
-
- """
- password = self.login_pwd.generate_password()
- attrs = {}
- attrs['userPassword'] = self.login_pwd.encrypt_password(password)
- logger.debug("LDAP LdapResetPassword encrypt_password %s"
- % (attrs['userPassword']))
- result = self.LdapModifyUser(record, attrs)
- return result
-
-
- def LdapSearch(self, req_ldap=None, expected_fields=None):
- """
- Used to search directly in LDAP, by using ldap filters and return
- fields. When req_ldap is None, returns all the entries in the LDAP.
-
- :param req_ldap: ldap style request, with appropriate filters,
- example: (cn=*).
- :param expected_fields: Fields in the user ldap entry that has to be
- returned. If None is provided, will return 'mail', 'givenName',
- 'sn', 'uid', 'sshPublicKey', 'shadowExpire'.
- :type req_ldap: string
- :type expected_fields: list
-
- .. seealso:: make_ldap_filters_from_record for req_ldap format.
-
- """
- logger.debug("JORDAN LdapSearch, req_ldap=%r, expected_fields=%r" % (req_ldap, expected_fields))
- result = self.conn.connect(bind=False)
- if (result['bool']):
-
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey', 'shadowExpire']
- else:
- return_fields_list = expected_fields
- #No specifc request specified, get the whole LDAP
- if req_ldap is None:
- req_ldap = '(cn=*)'
-
- logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
- return_fields_list %s" \
- %(req_ldap, return_fields_list))
-
- try:
- msg_id = self.conn.ldapserv.search(
- self.baseDN, ldap.SCOPE_SUBTREE,
- req_ldap, return_fields_list)
- #Get all the results matching the search from ldap in one
- #shot (1 value)
- result_type, result_data = \
- self.conn.ldapserv.result(msg_id, 1)
-
- self.conn.close()
-
- logger.debug("LDAP.PY \t LdapSearch result_data %s"
- % (result_data))
-
- return result_data
-
- except ldap.LDAPError, error:
- logger.log_exc("LDAP LdapSearch Error %s" % error)
- return []
-
- else:
- logger.error("LDAP.PY \t Connection Failed")
- return []
-
- def _process_ldap_info_for_all_users(self, result_data):
- """Process the data of all enabled users in LDAP.
-
- :param result_data: Contains information of all enabled users in LDAP
- and is coming from LdapSearch.
- :param result_data: list
-
- .. seealso:: LdapSearch
-
- """
- results = []
- logger.debug(" LDAP.py _process_ldap_info_for_all_users result_data %s "
- % (result_data))
- for ldapentry in result_data:
- logger.debug(" LDAP.py _process_ldap_info_for_all_users \
- ldapentry name : %s " % (ldapentry[1]['uid'][0]))
- tmpname = ldapentry[1]['uid'][0]
- hrn = self.authname + "." + tmpname
-
- tmpemail = ldapentry[1]['mail'][0]
- if ldapentry[1]['mail'][0] == "unknown":
- tmpemail = None
-
- try:
- results.append({
- 'type': 'user',
- 'pkey': ldapentry[1]['sshPublicKey'][0],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname ,
- 'email':tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry[1]['givenName'][0],
- 'last_name': ldapentry[1]['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': self.authname,
- 'peer_authority': '',
- 'pointer': -1,
- 'hrn': hrn,
- })
- except KeyError, error:
- logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s"
- % (error))
- return
-
- return results
-
- def _process_ldap_info_for_one_user(self, record, result_data):
- """
-
- Put the user's ldap data into shape. Only deals with one user
- record and one user data from ldap.
-
- :param record: user record
- :param result_data: Raw ldap data coming from LdapSearch
- :returns: user's data dict with 'type','pkey','uid', 'email',
- 'first_name' 'last_name''serial''authority''peer_authority'
- 'pointer''hrn'
- :type record: dict
- :type result_data: list
- :rtype :dict
-
- """
- #One entry only in the ldap data because we used a filter
- #to find one user only
- ldapentry = result_data[0][1]
- logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" % (ldapentry))
- tmpname = ldapentry['uid'][0]
-
- tmpemail = ldapentry['mail'][0]
- if ldapentry['mail'][0] == "unknown":
- tmpemail = None
-
- parent_hrn = None
- peer_authority = None
- # If the user is coming from External authority (e.g. OneLab)
- # Then hrn is None, it should be filled in by the creation of Ldap User
- # XXX LOIC !!! What if a user email is in 2 authorities?
- if 'hrn' in record and record['hrn'] is not None:
- hrn = record['hrn']
- parent_hrn = get_authority(hrn)
- if parent_hrn != self.authname:
- peer_authority = parent_hrn
- #In case the user was not imported from Iotlab LDAP
- #but from another federated site, has an account in
- #iotlab but currently using his hrn from federated site
- #then the login is different from the one found in its hrn
- if tmpname != hrn.split('.')[1]:
- hrn = None
- else:
- hrn = None
-
- if hrn is None:
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname,
- 'email': tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer': -1,
- }
- else:
- #hrn = None
- results = {
- 'type': 'user',
- 'pkey': ldapentry['sshPublicKey'],
- #'uid': ldapentry[1]['uid'][0],
- 'uid': tmpname,
- 'email': tmpemail,
- #'email': ldapentry[1]['mail'][0],
- 'first_name': ldapentry['givenName'][0],
- 'last_name': ldapentry['sn'][0],
- #'phone': 'none',
- 'serial': 'none',
- 'authority': parent_hrn,
- 'peer_authority': peer_authority,
- 'pointer': -1,
- 'hrn': hrn,
- }
- return results
-
- def LdapFindUser(self, record=None, is_user_enabled=None,
- expected_fields=None):
- """
-
- Search a SFA user with a hrn. User should be already registered
- in Iotlab LDAP.
-
- :param record: sfa user's record. Should contain first_name,last_name,
- email or mail. If no record is provided, returns all the users found
- in LDAP.
- :type record: dict
- :param is_user_enabled: is the user's iotlab account already valid.
- :type is_user_enabled: Boolean.
- :returns: LDAP entries from ldap matching the filter provided. Returns
- a single entry if one filter has been given and a list of
- entries otherwise.
- :rtype: dict or list
-
- """
- logger.debug("JORDAN LdapFindUser record=%r, is_user_enabled=%r, expected_fields=%r" % (record, is_user_enabled, expected_fields))
-
- custom_record = {}
- if is_user_enabled:
- custom_record['enabled'] = is_user_enabled
- if record:
- custom_record.update(record)
-
- req_ldap = self.make_ldap_filters_from_record(custom_record)
- return_fields_list = []
- if expected_fields is None:
- return_fields_list = ['mail', 'givenName', 'sn', 'uid',
- 'sshPublicKey']
- else:
- return_fields_list = expected_fields
-
- result_data = self.LdapSearch(req_ldap, return_fields_list)
- logger.debug("LDAP.PY \t LdapFindUser result_data %s" % (result_data))
-
- if len(result_data) == 0:
- return None
- #Asked for a specific user
- if record is not None:
- logger.debug("LOIC - record = %s" % record)
- results = self._process_ldap_info_for_one_user(record, result_data)
-
- else:
- #Asked for all users in ldap
- results = self._process_ldap_info_for_all_users(result_data)
- return results
+++ /dev/null
-"""
-File used to handle issuing request to OAR and parse OAR's JSON responses.
-Contains the following classes:
-- JsonPage : handles multiple pages OAR answers.
-- OARRestapi : handles issuing POST or GET requests to OAR.
-- ParsingResourcesFull : dedicated to parsing OAR's answer to a get resources
-full request.
-- OARGETParser : handles parsing the Json answers to different GET requests.
-
-"""
-from httplib import HTTPConnection, HTTPException, NotConnected
-import json
-from sfa.util.config import Config
-from sfa.util.sfalogging import logger
-import os.path
-
-
-class JsonPage:
-
- """Class used to manipulate json pages given by OAR.
-
- In case the json answer from a GET request is too big to fit in one json
- page, this class provides helper methods to retrieve all the pages and
- store them in a list before putting them into one single json dictionary,
- facilitating the parsing.
-
- """
-
- def __init__(self):
- """Defines attributes to manipulate and parse the json pages.
-
- """
- #All are boolean variables
- self.concatenate = False
- #Indicates end of data, no more pages to be loaded.
- self.end = False
- self.next_page = False
- #Next query address
- self.next_offset = None
- #Json page
- self.raw_json = None
-
- def FindNextPage(self):
- """
- Gets next data page from OAR when the query's results are too big to
- be transmitted in a single page. Uses the "links' item in the json
- returned to check if an additionnal page has to be loaded. Updates
- object attributes next_page, next_offset, and end.
-
- """
- if "links" in self.raw_json:
- for page in self.raw_json['links']:
- if page['rel'] == 'next':
- self.concatenate = True
- self.next_page = True
- self.next_offset = "?" + page['href'].split("?")[1]
- return
-
- if self.concatenate:
- self.end = True
- self.next_page = False
- self.next_offset = None
-
- return
-
- #Otherwise, no next page and no concatenate, must be a single page
- #Concatenate the single page and get out of here.
- else:
- self.next_page = False
- self.concatenate = True
- self.next_offset = None
- return
-
- @staticmethod
- def ConcatenateJsonPages(saved_json_list):
- """
- If the json answer is too big to be contained in a single page,
- all the pages have to be loaded and saved before being appended to the
- first page.
-
- :param saved_json_list: list of all the stored pages, including the
- first page.
- :type saved_json_list: list
- :returns: Returns a dictionary with all the pages saved in the
- saved_json_list. The key of the dictionary is 'items'.
- :rtype: dict
-
-
- .. seealso:: SendRequest
- .. warning:: Assumes the apilib is 0.2.10 (with the 'items' key in the
- raw json dictionary)
-
- """
- #reset items list
-
- tmp = {}
- tmp['items'] = []
-
- for page in saved_json_list:
- tmp['items'].extend(page['items'])
- return tmp
-
- def ResetNextPage(self):
- """
- Resets all the Json page attributes (next_page, next_offset,
- concatenate, end). Has to be done before getting another json answer
- so that the previous page status does not affect the new json load.
-
- """
- self.next_page = True
- self.next_offset = None
- self.concatenate = False
- self.end = False
-
-
-class OARrestapi:
- """Class used to connect to the OAR server and to send GET and POST
- requests.
-
- """
-
- # classes attributes
-
- OAR_REQUEST_POST_URI_DICT = {'POST_job': {'uri': '/oarapi/jobs.json'},
- 'DELETE_jobs_id':
- {'uri': '/oarapi/jobs/id.json'},
- }
-
- POST_FORMAT = {'json': {'content': "application/json", 'object': json}}
-
- #OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
- #'workdir':"/home/", 'walltime':""}
-
- def __init__(self, config_file='/etc/sfa/oar_config.py'):
- self.oarserver = {}
- self.oarserver['uri'] = None
- self.oarserver['postformat'] = 'json'
-
- try:
- execfile(config_file, self.__dict__)
-
- self.config_file = config_file
- # path to configuration data
- self.config_path = os.path.dirname(config_file)
-
- except IOError:
- raise IOError, "Could not find or load the configuration file: %s" \
- % config_file
- #logger.setLevelDebug()
- self.oarserver['ip'] = self.OAR_IP
- self.oarserver['port'] = self.OAR_PORT
- self.jobstates = ['Terminated', 'Hold', 'Waiting', 'toLaunch',
- 'toError', 'toAckReservation', 'Launching',
- 'Finishing', 'Running', 'Suspended', 'Resuming',
- 'Error']
-
- self.parser = OARGETParser(self)
-
-
- def GETRequestToOARRestAPI(self, request, strval=None,
- next_page=None, username=None):
-
- """Makes a GET request to OAR.
-
- Fetch the uri associated with the resquest stored in
- OARrequests_uri_dict, adds the username if needed and if available, adds
- strval to the request uri if needed, connects to OAR and issues the GET
- request. Gets the json reply.
-
- :param request: One of the known get requests that are keys in the
- OARrequests_uri_dict.
- :param strval: used when a job id has to be specified.
- :param next_page: used to tell OAR to send the next page for this
- Get request. Is appended to the GET uri.
- :param username: used when a username has to be specified, when looking
- for jobs scheduled by a particular user for instance.
-
- :type request: string
- :type strval: integer
- :type next_page: boolean
- :type username: string
- :returns: a json dictionary if OAR successfully processed the GET
- request.
-
- .. seealso:: OARrequests_uri_dict
- """
- self.oarserver['uri'] = \
- OARGETParser.OARrequests_uri_dict[request]['uri']
- #Get job details with username
- if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
- self.oarserver['uri'] += \
- OARGETParser.OARrequests_uri_dict[request]['owner'] + username
- headers = {}
- data = json.dumps({})
- logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" % (request))
- if strval:
- self.oarserver['uri'] = self.oarserver['uri'].\
- replace("id", str(strval))
-
- if next_page:
- self.oarserver['uri'] += next_page
-
- if username:
- headers['X-REMOTE_IDENT'] = username
-
- logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
- self.oarserver['uri'] %s strval %s"
- % (self.oarserver['uri'], strval))
- try:
- #seems that it does not work if we don't add this
- headers['content-length'] = '0'
-
- conn = HTTPConnection(self.oarserver['ip'],
- self.oarserver['port'])
- conn.request("GET", self.oarserver['uri'], data, headers)
- resp = conn.getresponse()
- body = resp.read()
- except Exception as error:
- logger.log_exc("GET_OAR_SRVR : Connection error: %s "
- % (error))
- raise Exception ("GET_OAR_SRVR : Connection error %s " %(error))
-
- finally:
- conn.close()
-
- # except HTTPException, error:
- # logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s "
- # % (error))
- #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
- if resp.status >= 400:
- raise ValueError ("Response Error %s, %s, %s" %(resp.status,
- resp.reason, resp.read()))
- try:
- js_dict = json.loads(body)
- #print "\r\n \t\t\t js_dict keys" , js_dict.keys(), " \r\n", js_dict
- return js_dict
-
- except ValueError, error:
- logger.log_exc("Failed to parse Server Response: %s ERROR %s"
- % (body, error))
- #raise ServerError("Failed to parse Server Response:" + js)
-
-
- def POSTRequestToOARRestAPI(self, request, datadict, username=None):
- """ Used to post a job on OAR , along with data associated
- with the job.
-
- """
-
- #first check that all params for are OK
- try:
- self.oarserver['uri'] = \
- self.OAR_REQUEST_POST_URI_DICT[request]['uri']
-
- except KeyError:
- logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
- valid")
- return
- if datadict and 'strval' in datadict:
- self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
- str(datadict['strval']))
- del datadict['strval']
-
- data = json.dumps(datadict)
- headers = {'X-REMOTE_IDENT':username, \
- 'content-type': self.POST_FORMAT['json']['content'], \
- 'content-length':str(len(data))}
- try :
-
- conn = HTTPConnection(self.oarserver['ip'], \
- self.oarserver['port'])
- conn.request("POST", self.oarserver['uri'], data, headers)
- resp = conn.getresponse()
- body = resp.read()
-
- except NotConnected:
- logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
- data %s \r\n \t\n \t\t headers %s uri %s" \
- %(data,headers,self.oarserver['uri']))
- except Exception as error:
- logger.log_exc("POST_OAR_SERVER : Connection error: %s "
- % (error))
- raise Exception ("POST_OAR_SERVER : Connection error %s " %(error))
-
- finally:
- conn.close()
-
- if resp.status >= 400:
- raise ValueError ("Response Error %s, %s, %s" %(resp.status,
- resp.reason, body))
-
-
- try:
- answer = json.loads(body)
- logger.debug("POSTRequestToOARRestAPI : answer %s" % (answer))
- return answer
-
- except ValueError, error:
- logger.log_exc("Failed to parse Server Response: error %s \
- %s" %(error))
- #raise ServerError("Failed to parse Server Response:" + answer)
-
-
-class ParsingResourcesFull():
- """
- Class dedicated to parse the json response from a GET_resources_full from
- OAR.
-
- """
- def __init__(self):
- """
- Set the parsing dictionary. Works like a switch case, if the key is
- found in the dictionary, then the associated function is called.
- This is used in ParseNodes to create an usable dictionary from
- the Json returned by OAR when issuing a GET resources full request.
-
- .. seealso:: ParseNodes
-
- """
- self.resources_fulljson_dict = {
- 'network_address': self.AddNodeNetworkAddr,
- 'site': self.AddNodeSite,
- # 'radio': self.AddNodeRadio,
- 'mobile': self.AddMobility,
- 'x': self.AddPosX,
- 'y': self.AddPosY,
- 'z': self.AddPosZ,
- 'archi': self.AddHardwareType,
- 'state': self.AddBootState,
- 'id': self.AddOarNodeId,
- 'mobility_type': self.AddMobilityType,
- }
-
-
-
- def AddOarNodeId(self, tuplelist, value):
- """Adds Oar internal node id to the nodes' attributes.
-
- Appends tuple ('oar_id', node_id) to the tuplelist. Used by ParseNodes.
-
- .. seealso:: ParseNodes
-
- """
-
- tuplelist.append(('oar_id', int(value)))
-
-
- def AddNodeNetworkAddr(self, dictnode, value):
- """First parsing function to be called to parse the json returned by OAR
- answering a GET_resources (/oarapi/resources.json) request.
-
- When a new node is found in the json, this function is responsible for
- creating a new entry in the dictionary for storing information on this
- specific node. The key is the node network address, which is also the
- node's hostname.
- The value associated with the key is a tuple list.It contains all
- the nodes attributes. The tuplelist will later be turned into a dict.
-
- :param dictnode: should be set to the OARGETParser atribute
- node_dictlist. It will store the information on the nodes.
- :param value: the node_id is the network_address in the raw json.
- :type value: string
- :type dictnode: dictionary
-
- .. seealso: ParseResources, ParseNodes
- """
-
- node_id = value
- dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]
-
- return node_id
-
- def AddNodeSite(self, tuplelist, value):
- """Add the site's node to the dictionary.
-
-
- :param tuplelist: tuple list on which to add the node's site.
- Contains the other node attributes as well.
- :param value: value to add to the tuple list, in this case the node's
- site.
- :type tuplelist: list
- :type value: string
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('site', str(value)))
-
- # def AddNodeRadio(tuplelist, value):
- # """Add thenode's radio chipset type to the tuple list.
-
- # :param tuplelist: tuple list on which to add the node's mobility
- # status. The tuplelist is the value associated with the node's
- # id in the OARGETParser
- # 's dictionary node_dictlist.
- # :param value: name of the radio chipset on the node.
- # :type tuplelist: list
- # :type value: string
-
- # .. seealso:: AddNodeNetworkAddr
-
- # """
- # tuplelist.append(('radio', str(value)))
-
- def AddMobilityType(self, tuplelist, value):
- """Adds which kind of mobility it is, train or roomba robot.
-
- :param tuplelist: tuple list on which to add the node's mobility status.
- The tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: tells if a node is a mobile node or not. The value is
- found in the json.
-
- :type tuplelist: list
- :type value: integer
-
- """
- tuplelist.append(('mobility_type', str(value)))
-
-
- def AddMobility(self, tuplelist, value):
- """Add if the node is a mobile node or not to the tuple list.
-
- :param tuplelist: tuple list on which to add the node's mobility status.
- The tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: tells if a node is a mobile node or not. The value is found
- in the json.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- if value is 0:
- tuplelist.append(('mobile', 'False'))
- else:
- tuplelist.append(('mobile', 'True'))
-
-
- def AddPosX(self, tuplelist, value):
- """Add the node's position on the x axis.
-
- :param tuplelist: tuple list on which to add the node's position . The
- tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: the position x.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('posx', value ))
-
-
-
- def AddPosY(self, tuplelist, value):
- """Add the node's position on the y axis.
-
- :param tuplelist: tuple list on which to add the node's position . The
- tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: the position y.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('posy', value))
-
-
-
- def AddPosZ(self, tuplelist, value):
- """Add the node's position on the z axis.
-
- :param tuplelist: tuple list on which to add the node's position . The
- tuplelist is the value associated with the node's id in the
- OARGETParser's dictionary node_dictlist.
- :param value: the position z.
-
- :type tuplelist: list
- :type value: integer
-
- .. seealso:: AddNodeNetworkAddr
-
- """
-
- tuplelist.append(('posz', value))
-
-
-
- def AddBootState(tself, tuplelist, value):
- """Add the node's state, Alive or Suspected.
-
- :param tuplelist: tuple list on which to add the node's state . The
- tuplelist is the value associated with the node's id in the
- OARGETParser 's dictionary node_dictlist.
- :param value: node's state.
-
- :type tuplelist: list
- :type value: string
-
- .. seealso:: AddNodeNetworkAddr
-
- """
- tuplelist.append(('boot_state', str(value)))
-
-
- def AddHardwareType(self, tuplelist, value):
- """Add the node's hardware model and radio chipset type to the tuple
- list.
-
- :param tuplelist: tuple list on which to add the node's architecture
- and radio chipset type.
- :param value: hardware type: radio chipset. The value contains both the
- architecture and the radio chipset, separated by a colon.
- :type tuplelist: list
- :type value: string
-
- .. seealso:: AddNodeNetworkAddr
-
- """
-
- value_list = value.split(':')
- tuplelist.append(('archi', value_list[0]))
- tuplelist.append(('radio', value_list[1]))
-
-
-class OARGETParser:
- """Class providing parsing methods associated to specific GET requests.
-
- """
-
- def __init__(self, srv):
- self.version_json_dict = {
- 'api_version': None, 'apilib_version': None,
- 'api_timezone': None, 'api_timestamp': None, 'oar_version': None}
- self.config = Config()
- self.interface_hrn = self.config.SFA_INTERFACE_HRN
- self.timezone_json_dict = {
- 'timezone': None, 'api_timestamp': None, }
- #self.jobs_json_dict = {
- #'total' : None, 'links' : [],\
- #'offset':None , 'items' : [], }
- #self.jobs_table_json_dict = self.jobs_json_dict
- #self.jobs_details_json_dict = self.jobs_json_dict
- self.server = srv
- self.node_dictlist = {}
-
- self.json_page = JsonPage()
- self.parsing_resourcesfull = ParsingResourcesFull()
- self.site_dict = {}
- self.jobs_list = []
- self.SendRequest("GET_version")
-
-
- def ParseVersion(self):
- """Parses the OAR answer to the GET_version ( /oarapi/version.json.)
-
- Finds the OAR apilib version currently used. Has an impact on the json
- structure returned by OAR, so the version has to be known before trying
- to parse the jsons returned after a get request has been issued.
- Updates the attribute version_json_dict.
-
- """
-
- if 'oar_version' in self.json_page.raw_json:
- self.version_json_dict.update(
- api_version=self.json_page.raw_json['api_version'],
- apilib_version=self.json_page.raw_json['apilib_version'],
- api_timezone=self.json_page.raw_json['api_timezone'],
- api_timestamp=self.json_page.raw_json['api_timestamp'],
- oar_version=self.json_page.raw_json['oar_version'])
- else:
- self.version_json_dict.update(
- api_version=self.json_page.raw_json['api'],
- apilib_version=self.json_page.raw_json['apilib'],
- api_timezone=self.json_page.raw_json['api_timezone'],
- api_timestamp=self.json_page.raw_json['api_timestamp'],
- oar_version=self.json_page.raw_json['oar'])
-
- print self.version_json_dict['apilib_version']
-
-
- def ParseTimezone(self):
- """Get the timezone used by OAR.
-
- Get the timezone from the answer to the GET_timezone request.
- :return: api_timestamp and api timezone.
- :rype: integer, integer
-
- .. warning:: unused.
- """
- api_timestamp = self.json_page.raw_json['api_timestamp']
- api_tz = self.json_page.raw_json['timezone']
- return api_timestamp, api_tz
-
- def ParseJobs(self):
- """Called when a GET_jobs request has been issued to OAR.
-
- Corresponds to /oarapi/jobs.json uri. Currently returns the raw json
- information dict.
- :returns: json_page.raw_json
- :rtype: dictionary
-
- .. warning:: Does not actually parse the information in the json. SA
- 15/07/13.
-
- """
- self.jobs_list = []
- print " ParseJobs "
- return self.json_page.raw_json
-
- def ParseJobsTable(self):
- """In case we need to use the job table in the future.
-
- Associated with the GET_jobs_table : '/oarapi/jobs/table.json uri.
- .. warning:: NOT USED. DOES NOTHING.
- """
- print "ParseJobsTable"
-
- def ParseJobsDetails(self):
- """Currently only returns the same json in self.json_page.raw_json.
-
- .. todo:: actually parse the json
- .. warning:: currently, this function is not used a lot, so I have no
- idea what could be useful to parse, returning the full json. NT
- """
-
- #logger.debug("ParseJobsDetails %s " %(self.json_page.raw_json))
- return self.json_page.raw_json
-
-
- def ParseJobsIds(self):
- """Associated with the GET_jobs_id OAR request.
-
- Parses the json dict (OAR answer) to the GET_jobs_id request
- /oarapi/jobs/id.json.
-
-
- :returns: dictionary whose keys are listed in the local variable
- job_resources and values that are in the json dictionary returned
- by OAR with the job information.
- :rtype: dict
-
- """
- job_resources = ['wanted_resources', 'name', 'id', 'start_time',
- 'state', 'owner', 'walltime', 'message']
-
- # Unused variable providing the contents of the json dict returned from
- # get job resources full request
- job_resources_full = [
- 'launching_directory', 'links',
- 'resubmit_job_id', 'owner', 'events', 'message',
- 'scheduled_start', 'id', 'array_id', 'exit_code',
- 'properties', 'state', 'array_index', 'walltime',
- 'type', 'initial_request', 'stop_time', 'project',
- 'start_time', 'dependencies', 'api_timestamp', 'submission_time',
- 'reservation', 'stdout_file', 'types', 'cpuset_name',
- 'name', 'wanted_resources', 'queue', 'stderr_file', 'command']
-
-
- job_info = self.json_page.raw_json
- #logger.debug("OARESTAPI ParseJobsIds %s" %(self.json_page.raw_json))
- values = []
- try:
- for k in job_resources:
- values.append(job_info[k])
- return dict(zip(job_resources, values))
-
- except KeyError:
- logger.log_exc("ParseJobsIds KeyError ")
-
-
- def ParseJobsIdResources(self):
- """ Parses the json produced by the request
- /oarapi/jobs/id/resources.json.
- Returns a list of oar node ids that are scheduled for the
- given job id.
-
- """
- job_resources = []
- for resource in self.json_page.raw_json['items']:
- job_resources.append(resource['id'])
-
- return job_resources
-
- def ParseResources(self):
- """ Parses the json produced by a get_resources request on oar."""
-
- #logger.debug("OARESTAPI \tParseResources " )
- #resources are listed inside the 'items' list from the json
- self.json_page.raw_json = self.json_page.raw_json['items']
- self.ParseNodes()
-
- def ParseReservedNodes(self):
- """ Returns an array containing the list of the jobs scheduled
- with the reserved nodes if available.
-
- :returns: list of job dicts, each dict containing the following keys:
- t_from, t_until, resources_ids (of the reserved nodes for this job).
- If the information is not available, default values will be set for
- these keys. The other keys are : state, lease_id and user.
- :rtype: list
-
- """
-
- #resources are listed inside the 'items' list from the json
- reservation_list = []
- job = {}
- #Parse resources info
- for json_element in self.json_page.raw_json['items']:
- #In case it is a real reservation (not asap case)
- if json_element['scheduled_start']:
- job['t_from'] = json_element['scheduled_start']
- job['t_until'] = int(json_element['scheduled_start']) + \
- int(json_element['walltime'])
- #Get resources id list for the job
- job['resource_ids'] = [node_dict['id'] for node_dict
- in json_element['resources']]
- else:
- job['t_from'] = "As soon as possible"
- job['t_until'] = "As soon as possible"
- job['resource_ids'] = ["Undefined"]
-
- job['state'] = json_element['state']
- job['lease_id'] = json_element['id']
-
- job['user'] = json_element['owner']
- #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))
- reservation_list.append(job)
- #reset dict
- job = {}
- return reservation_list
-
- def ParseRunningJobs(self):
- """ Gets the list of nodes currently in use from the attributes of the
- running jobs.
-
- :returns: list of hostnames, the nodes that are currently involved in
- running jobs.
- :rtype: list
-
-
- """
- logger.debug("OARESTAPI \tParseRunningJobs_________________ ")
- #resources are listed inside the 'items' list from the json
- nodes = []
- for job in self.json_page.raw_json['items']:
- for node in job['nodes']:
- nodes.append(node['network_address'])
- return nodes
-
- def ChangeRawJsonDependingOnApilibVersion(self):
- """
- Check if the OAR apilib version is different from 0.2.10, in which case
- the Json answer is also dict instead as a plain list.
-
- .. warning:: the whole code is assuming the json contains a 'items' key
- .. seealso:: ConcatenateJsonPages, ParseJobs, ParseReservedNodes,
- ParseJobsIdResources, ParseResources, ParseRunningJobs
- .. todo:: Clean the whole code. Either suppose the apilib will always
- provide the 'items' key, or handle different options.
- """
-
- if self.version_json_dict['apilib_version'] != "0.2.10":
- self.json_page.raw_json = self.json_page.raw_json['items']
-
- def ParseDeleteJobs(self):
- """ No need to parse anything in this function.A POST
- is done to delete the job.
-
- """
- return
-
- def ParseResourcesFull(self):
- """ This method is responsible for parsing all the attributes
- of all the nodes returned by OAR when issuing a get resources full.
- The information from the nodes and the sites are separated.
- Updates the node_dictlist so that the dictionnary of the platform's
- nodes is available afterwards.
-
- :returns: node_dictlist, a list of dictionaries about the nodes and
- their properties.
- :rtype: list
-
- """
- logger.debug("OARRESTAPI ParseResourcesFull___________ ")
- #print self.json_page.raw_json[1]
- #resources are listed inside the 'items' list from the json
- self.ChangeRawJsonDependingOnApilibVersion()
- self.ParseNodes()
- self.ParseSites()
- return self.node_dictlist
-
- def ParseResourcesFullSites(self):
- """ Called by GetSites which is unused.
- Originally used to get information from the sites, with for each site
- the list of nodes it has, along with their properties.
-
- :return: site_dict, dictionary of sites
- :rtype: dict
-
- .. warning:: unused
- .. seealso:: GetSites (IotlabShell)
-
- """
- self.ChangeRawJsonDependingOnApilibVersion()
- self.ParseNodes()
- self.ParseSites()
- return self.site_dict
-
-
- def ParseNodes(self):
- """ Parse nodes properties from OAR
- Put them into a dictionary with key = node id and value is a dictionary
- of the node properties and properties'values.
-
- """
- node_id = None
- _resources_fulljson_dict = \
- self.parsing_resourcesfull.resources_fulljson_dict
- keys = _resources_fulljson_dict.keys()
- keys.sort()
-
- for dictline in self.json_page.raw_json:
- node_id = None
- # dictionary is empty and/or a new node has to be inserted
- node_id = _resources_fulljson_dict['network_address'](
- self.node_dictlist, dictline['network_address'])
- for k in keys:
- if k in dictline:
- if k == 'network_address':
- continue
-
- _resources_fulljson_dict[k](
- self.node_dictlist[node_id], dictline[k])
-
- #The last property has been inserted in the property tuple list,
- #reset node_id
- #Turn the property tuple list (=dict value) into a dictionary
- self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
- node_id = None
-
- @staticmethod
- def iotlab_hostname_to_hrn(root_auth, hostname):
- """
- Transforms a node hostname into a SFA hrn.
-
- :param root_auth: Name of the root authority of the SFA server. In
- our case, it is set to iotlab.
- :param hostname: node's hotname, given by OAR.
- :type root_auth: string
- :type hostname: string
- :returns: inserts the root_auth and '.' before the hostname.
- :rtype: string
-
- """
- return root_auth + '.' + hostname
-
- def ParseSites(self):
- """ Returns a list of dictionnaries containing the sites' attributes."""
-
- nodes_per_site = {}
- config = Config()
- #logger.debug(" OARrestapi.py \tParseSites self.node_dictlist %s"\
- #%(self.node_dictlist))
- # Create a list of nodes per site_id
- for node_id in self.node_dictlist:
- node = self.node_dictlist[node_id]
-
- if node['site'] not in nodes_per_site:
- nodes_per_site[node['site']] = []
- nodes_per_site[node['site']].append(node['node_id'])
- else:
- if node['node_id'] not in nodes_per_site[node['site']]:
- nodes_per_site[node['site']].append(node['node_id'])
-
- #Create a site dictionary whose key is site_login_base
- # (name of the site) and value is a dictionary of properties,
- # including the list of the node_ids
- for node_id in self.node_dictlist:
- node = self.node_dictlist[node_id]
- node.update({'hrn': self.iotlab_hostname_to_hrn(self.interface_hrn,
- node['hostname'])})
- self.node_dictlist.update({node_id: node})
-
- if node['site'] not in self.site_dict:
- self.site_dict[node['site']] = {
- 'site': node['site'],
- 'node_ids': nodes_per_site[node['site']],
- 'latitude': "48.83726",
- 'longitude': "- 2.10336",
- 'name': config.SFA_REGISTRY_ROOT_AUTH,
- 'pcu_ids': [], 'max_slices': None,
- 'ext_consortium_id': None,
- 'max_slivers': None, 'is_public': True,
- 'peer_site_id': None,
- 'abbreviated_name': "iotlab", 'address_ids': [],
- 'url': "https://portal.senslab.info", 'person_ids': [],
- 'site_tag_ids': [], 'enabled': True, 'slice_ids': [],
- 'date_created': None, 'peer_id': None
- }
-
- OARrequests_uri_dict = {
- 'GET_version':
- {'uri': '/oarapi/version.json', 'parse_func': ParseVersion},
-
- 'GET_timezone':
- {'uri': '/oarapi/timezone.json', 'parse_func': ParseTimezone},
-
- 'GET_jobs':
- {'uri': '/oarapi/jobs.json', 'parse_func': ParseJobs},
-
- 'GET_jobs_id':
- {'uri': '/oarapi/jobs/id.json', 'parse_func': ParseJobsIds},
-
- 'GET_jobs_id_resources':
- {'uri': '/oarapi/jobs/id/resources.json',
- 'parse_func': ParseJobsIdResources},
-
- 'GET_jobs_table':
- {'uri': '/oarapi/jobs/table.json', 'parse_func': ParseJobsTable},
-
- 'GET_jobs_details':
- {'uri': '/oarapi/jobs/details.json', 'parse_func': ParseJobsDetails},
-
- 'GET_reserved_nodes':
- {'uri':
- '/oarapi/jobs/details.json?state=Running,Waiting,Launching',
- 'owner': '&user=', 'parse_func': ParseReservedNodes},
-
- 'GET_running_jobs':
- {'uri': '/oarapi/jobs/details.json?state=Running',
- 'parse_func': ParseRunningJobs},
-
- 'GET_resources_full':
- {'uri': '/oarapi/resources/full.json',
- 'parse_func': ParseResourcesFull},
-
- 'GET_sites':
- {'uri': '/oarapi/resources/full.json',
- 'parse_func': ParseResourcesFullSites},
-
- 'GET_resources':
- {'uri': '/oarapi/resources.json', 'parse_func': ParseResources},
-
- 'DELETE_jobs_id':
- {'uri': '/oarapi/jobs/id.json', 'parse_func': ParseDeleteJobs}}
-
-
- def SendRequest(self, request, strval=None, username=None):
- """ Connects to OAR , sends the valid GET requests and uses
- the appropriate json parsing functions.
-
- :returns: calls to the appropriate parsing function, associated with the
- GET request
- :rtype: depends on the parsing function called.
-
- .. seealso:: OARrequests_uri_dict
- """
- save_json = None
-
- self.json_page.ResetNextPage()
- save_json = []
-
- if request in self.OARrequests_uri_dict:
- while self.json_page.next_page:
- self.json_page.raw_json = self.server.GETRequestToOARRestAPI(
- request,
- strval,
- self.json_page.next_offset,
- username)
- self.json_page.FindNextPage()
- if self.json_page.concatenate:
- save_json.append(self.json_page.raw_json)
-
- if self.json_page.concatenate and self.json_page.end:
- self.json_page.raw_json = \
- self.json_page.ConcatenateJsonPages(save_json)
-
- return self.OARrequests_uri_dict[request]['parse_func'](self)
- else:
- logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST "
- % (request))
+++ /dev/null
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-PAPER =
-BUILDDIR = build
-
-# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
- @echo "Please use \`make <target>' where <target> is one of"
- @echo " html to make standalone HTML files"
- @echo " dirhtml to make HTML files named index.html in directories"
- @echo " singlehtml to make a single large HTML file"
- @echo " pickle to make pickle files"
- @echo " json to make JSON files"
- @echo " htmlhelp to make HTML files and a HTML help project"
- @echo " qthelp to make HTML files and a qthelp project"
- @echo " devhelp to make HTML files and a Devhelp project"
- @echo " epub to make an epub"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
- @echo " latexpdf to make LaTeX files and run them through pdflatex"
- @echo " text to make text files"
- @echo " man to make manual pages"
- @echo " texinfo to make Texinfo files"
- @echo " info to make Texinfo files and run them through makeinfo"
- @echo " gettext to make PO message catalogs"
- @echo " changes to make an overview of all changed/added/deprecated items"
- @echo " linkcheck to check all external links for integrity"
- @echo " doctest to run all doctests embedded in the documentation (if enabled)"
-
-clean:
- -rm -rf $(BUILDDIR)/*
-
-html:
- $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
- $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
- @echo
- @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
- $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
- @echo
- @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
- $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
- @echo
- @echo "Build finished; now you can process the pickle files."
-
-json:
- $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
- @echo
- @echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
- $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
- @echo
- @echo "Build finished; now you can run HTML Help Workshop with the" \
- ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
- $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
- @echo
- @echo "Build finished; now you can run "qcollectiongenerator" with the" \
- ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/IotlabSFAdriver.qhcp"
- @echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/IotlabSFAdriver.qhc"
-
-devhelp:
- $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
- @echo
- @echo "Build finished."
- @echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/IotlabSFAdriver"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/IotlabSFAdriver"
- @echo "# devhelp"
-
-epub:
- $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
- @echo
- @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo
- @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
- @echo "Run \`make' in that directory to run these through (pdf)latex" \
- "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
- $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
- @echo "Running LaTeX files through pdflatex..."
- $(MAKE) -C $(BUILDDIR)/latex all-pdf
- @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
- $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
- @echo
- @echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
- $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
- @echo
- @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo
- @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
- @echo "Run \`make' in that directory to run these through makeinfo" \
- "(use \`make info' here to do that automatically)."
-
-info:
- $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
- @echo "Running Texinfo files through makeinfo..."
- make -C $(BUILDDIR)/texinfo info
- @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
- $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
- @echo
- @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
- $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
- @echo
- @echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
- $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
- @echo
- @echo "Link check complete; look for any errors in the above output " \
- "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
- $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
- @echo "Testing of doctests in the sources finished, look at the " \
- "results in $(BUILDDIR)/doctest/output.txt."
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# Iotlab SFA driver documentation build configuration file, created by
-# sphinx-quickstart on Tue Jul 2 11:53:15 2013.
-#
-# This file is execfile()d with the current directory set to its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys, os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../../../'))
-sys.path.insert(0, os.path.abspath('../../../storage/'))
-sys.path.insert(0, os.path.abspath('../../../../'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/versions/'))
-sys.path.insert(0, os.path.abspath('../../../rspecs/elements/'))
-sys.path.insert(0, os.path.abspath('../../../importer/'))
-print sys.path
-
-# -- General configuration -----------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be extensions
-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode']
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Iotlab SFA driver'
-copyright = u'2013, Sandrine Avakian '
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '1.0'
-# The full version, including alpha/beta/rc tags.
-release = '1.0'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = []
-
-# The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-
-# -- Options for HTML output ---------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'IotlabSFAdriverdoc'
-
-
-# -- Options for LaTeX output --------------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass [howto/manual]).
-latex_documents = [
- ('index', 'IotlabSFAdriver.tex', u'Iotlab SFA driver Documentation',
- u'Sandrine Avakian ', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output --------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'iotlabsfadriver', u'Iotlab SFA driver Documentation',
- [u'Sandrine Avakian '], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output ------------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'IotlabSFAdriver', u'Iotlab SFA driver Documentation',
- u'Sandrine Avakian ', 'IotlabSFAdriver', 'One line description of project.',
- 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-
-# -- Options for Epub output ---------------------------------------------------
-
-# Bibliographic Dublin Core info.
-epub_title = u'Iotlab SFA driver'
-epub_author = u'Sandrine Avakian '
-epub_publisher = u'Sandrine Avakian '
-epub_copyright = u'2013, Sandrine Avakian '
-
-# The language of the text. It defaults to the language option
-# or en if the language is not set.
-#epub_language = ''
-
-# The scheme of the identifier. Typical schemes are ISBN or URL.
-#epub_scheme = ''
-
-# The unique identifier of the text. This can be a ISBN number
-# or the project homepage.
-#epub_identifier = ''
-
-# A unique identification for the text.
-#epub_uid = ''
-
-# A tuple containing the cover image and cover page html template filenames.
-#epub_cover = ()
-
-# HTML files that should be inserted before the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_pre_files = []
-
-# HTML files shat should be inserted after the pages created by sphinx.
-# The format is a list of tuples containing the path and title.
-#epub_post_files = []
-
-# A list of files that should not be packed into the epub file.
-#epub_exclude_files = []
-
-# The depth of the table of contents in toc.ncx.
-#epub_tocdepth = 3
-
-# Allow duplicate toc entries.
-#epub_tocdup = True
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'http://docs.python.org/': None}
+++ /dev/null
-importer Package
-=================
-
-:mod:`iotlabimporter` Module
-------------------------------
-
-.. automodule:: importer.iotlabimporter
- :members:
- :undoc-members:
- :show-inheritance:
-
-
+++ /dev/null
-.. Iotlab SFA driver documentation master file, created by
- sphinx-quickstart on Tue Jul 2 11:53:15 2013.
- You can adapt this file completely to your liking, but it should at least
- contain the root `toctree` directive.
-
-Welcome to Iotlab SFA driver's documentation!
-=============================================
-
-===================
-Code tree overview
-===================
-
-------------
-Installation
-------------
-**Using git**
-
-git clone git://git.onelab.eu/sfa.git
-cd sfa
-git checkout <version>
-make version
-python setup.py install
-
-<version> can be either geni-v2 or geni-v3.
-------
-Driver
-------
-**Folder**:/sfa/sfa/iotlab/
-
-The Iotlab driver source code is under the folder /sfa, along with the other
-testbeds driver folders. The /iotlab directory contains the necessary files
-defining API for OAR, LDAP, the postgresql table which is hosted in the SFA
-database as well as for the SFA managers.
-
-The OAR API enables the user to get information regarding nodes and jobs:
-nodes properties (hostnames, radio, mobility type, position with GPS
-coordinates and so on), jobs and the associated username and nodes.
-These are used when querying the testbed about resources
-and leases. In order to add a new node property in the iotlab Rspec format,
-the new property must be defined and parsed beforehand from OAR in the OAR
-API file.
-
-In the LDAP file, the LDAPapi class supposes the unix schema is used.
-If this class is reused in another context, it might not work without some bit
-of customization. The naming (turning a hostname into a sfa hrn, a LDAP login
-into a hrn ) is also done in this class.
-
-The iotlabpostgres file defines a dedicated lease table, hosted in the SFA
-database (in SFA version geni-v3) or in a separated and dedicated Iotlab
-database(in SFA geni-v2). Its purpose is to hold information that we
-can't store anywhere given the Iotlab architecture with OAR and LDAP, namely the
-association of a job and the slice hrn for which the job is supposed to run.
-Indeed, one user may register on another federated testbed then use his
-federated slice to book iotlab nodes. In this case, an Iotlab LDAP account will
-be created. Later on, when new users will be imported from the LDAP to the SFA
-database, an Iotlab slice will be created for each new user found in the LDAP.
-Thus leading us to the situation where one user may have the possibility to use
-different slices to book Iotlab nodes.
-
-----------------------------
-RSpec specific Iotlab format
-----------------------------
-**Folder**:/sfa/rspecs/versions , /sfa/rpecs/elements
-
-There is a specific Iotlab Rspec format. It aims at displaying information that
-is not hadled in the SFA Rspec format. Typically, it adds the nodes' mobility
-and its mobility type, the hardware architecture as well as the radio
-chipset used. This is done by defining a iotlabv1 rspec version format file
-under /rspecs/versions. Definitions of an iotlab rspec lease, node and sliver
-are done in the associated files under /rspecs/elements/versions.
-If a property has to be added to the nodes in the Iotlab Rspec format, it
-should be added in the iotlabv1Node file, using the other properties as example.
-
-Future work:
-The Rspec format has to be validated and stored on a website, as the header
-of the return Rspec defines it, which is not the case with the Iotlab rspec
-format. It has been discussed with Mohamed Larabi (Inria Sophia) once, never to
-be mentionned again. Although this does not prevent the SFA server from working,
-maybe it would be nice to be completely compliantand clean in this aspect also.
--SA Dec 2013-
-
---------
-Importer
---------
-**Folder**: /sfa/importer/
-
-The iotlab importer task is to populate the SFA database with records created
-from the information given by OAR and LDAP. Running the importer periodically
-enables the SFA database to be in sync with the LDAP by deleting/ adding records
-in the database corresponding to newly deleted/ added users in LDAP.
-
---------------
-Documentation
---------------
-**Folder** : /sfa/sfa/iotlab/docs
-
-Thsi folder contains the sphinx files needed to generate this documentation.
-As of Dec 2013, and because of the SFA database connexion methods, generating
-the documentation fails if the database is not accessible. In this case,
-Iotlabimporter will not be documented.
-A possible workaround is to build the documentation on the SFA server hosting
-the SFA database (which is not a really clean way to this...).
-To ngenerate the documentation, do "make html" in the /docs folder, where the
-Makefile is located. The package python-sphinx must be installed in order
-for this command to work.
-
-
---------
-Testing
---------
-Two scripts have been written to help with the testing. One is dedicated for
-testing the Iotlab driver, OAR and LDAP classes. The second is to check if the
-client commands work well.
-
-**Folder** : /sfa/testbeds/iotlab/tests
-
-* driver_tests.py : python script to test OAR, LDAP and Iotlabdriver/ IotlabShell
- methods. Modify the script to add more tests if needed.
-
- **starting the script** :python ./driver_tests <-option value> <option>
- example : python ./driver_tests -10 OAR (10 is the job_id in this case)
- option can be : OAR, sql, shell, LDAP , driver, all.
-
-* sfi_client_tests.py : python script to test all the sfi client commands :
- resources, list, allocate (geni-v3), provision(geni-v3), resources, show, status
- and delete. In the geni-v2 branch, this script uses create_sliver instead.
-
- **starting the script** : python ./sfi_client_tests.py <absolute path to the
- rspecs>.
- The Rspecs are included in the git repository under ./sfa/testbeds/iotlab/tests/tests_rspecs.
-
-
-
-.. toctree::
- :maxdepth: 2
-
-Code Documentation
-==================
-
-.. toctree::
- :maxdepth: 2
-
- iotlab.rst
- versions.rst
- importer.rst
-
-
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
+++ /dev/null
-iotlab Package
-==============
-
-:mod:`LDAPapi` Module
----------------------
-
-.. automodule:: iotlab.LDAPapi
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`OARrestapi` Module
-------------------------
-
-.. automodule:: iotlab.OARrestapi
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabaggregate` Module
------------------------------
-
-.. automodule:: iotlab.iotlabaggregate
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabshell` Module
---------------------------
-
-.. automodule:: iotlab.iotlabshell
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabdriver` Module
---------------------------
-
-.. automodule:: iotlab.iotlabdriver
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabpostgres` Module
-----------------------------
-
-.. automodule:: iotlab.iotlabpostgres
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabslices` Module
---------------------------
-
-.. automodule:: iotlab.iotlabslices
- :members:
- :undoc-members:
- :show-inheritance:
-
+++ /dev/null
-iotlab
-======
-
-.. toctree::
- :maxdepth: 4
-
- iotlab
-
-
-versions
-========
-
-.. toctree::
- :maxdepth: 4
-
- versions
+++ /dev/null
-versions Package
-================
-
-:mod:`iotlabv1Lease` Module
----------------------------
-
-.. automodule:: versions.iotlabv1Lease
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabv1Node` Module
---------------------------
-
-.. automodule:: versions.iotlabv1Node
- :members:
- :undoc-members:
- :show-inheritance:
-
-:mod:`iotlabv1Sliver` Module
-----------------------------
-
-.. automodule:: versions.iotlabv1Sliver
- :members:
- :undoc-members:
- :show-inheritance:
-
-"""
-File providing methods to generate valid RSpecs for the Iotlab testbed.
-Contains methods to get information on slice, slivers, nodes and leases,
-formatting them and turn it into a RSpec.
-"""
+# -*- coding:utf-8 -*-
+""" aggregate class management """
+
+from sfa.util.xrn import Xrn, hrn_to_urn
from sfa.util.sfatime import utcparse, datetime_to_string
-from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
-from sfa.iotlab.iotlabxrn import IotlabXrn
+from sfa.util.sfalogging import logger
from sfa.rspecs.rspec import RSpec
-#from sfa.rspecs.elements.location import Location
from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.login import Login
-# from sfa.rspecs.elements.services import ServicesElement
-from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.lease import Lease
from sfa.rspecs.elements.granularity import Granularity
from sfa.rspecs.version_manager import VersionManager
-from sfa.storage.model import SliverAllocation
-from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition, \
- IotlabNode, IotlabLocation
-from sfa.iotlab.iotlabxrn import xrn_object
-from sfa.util.sfalogging import logger
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabPosition
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabNode
+from sfa.rspecs.elements.versions.iotlabv1Node import IotlabLocation
+from sfa.iotlab.iotlablease import LeaseTable
import time
+import datetime
-class IotlabAggregate:
- """Aggregate manager class for Iotlab. """
-
- sites = {}
- nodes = {}
- api = None
- interfaces = {}
- links = {}
- node_tags = {}
-
- prepared = False
-
- user_options = {}
+class IotLABAggregate(object):
+ """
+ SFA aggregate for Iot-LAB testbed
+ """
def __init__(self, driver):
self.driver = driver
- def get_slice_and_slivers(self, slice_xrn, login=None):
- """
- Get the slices and the associated leases if any from the iotlab
- testbed. One slice can have mutliple leases.
- For each slice, get the nodes in the associated lease
- and create a sliver with the necessary info and insert it into the
- sliver dictionary, keyed on the node hostnames.
- Returns a dict of slivers based on the sliver's node_id.
- Called by get_rspec.
-
-
- :param slice_xrn: xrn of the slice
- :param login: user's login on iotlab ldap
-
- :type slice_xrn: string
- :type login: string
- :returns: a list of slices dict and a list of Sliver object
- :rtype: (list, list)
-
- .. note: There is no real slivers in iotlab, only leases. The goal
- is to be consistent with the SFA standard.
-
- """
- slivers = {}
- sfa_slice = None
- if slice_xrn is None:
- return (sfa_slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
-
- # GetSlices always returns a list, even if there is only one element
- slices = self.driver.GetSlices(slice_filter=str(slice_hrn),
- slice_filter_type='slice_hrn',
- login=login)
-
- logger.debug("IotlabAggregate api \tget_slice_and_slivers \
- slice_hrn %s \r\n slices %s self.driver.hrn %s"
- % (slice_hrn, slices, self.driver.hrn))
- if slices == []:
- return (sfa_slice, slivers)
-
- # sort slivers by node id , if there is a job
- #and therefore, node allocated to this slice
- # for sfa_slice in slices:
- sfa_slice = slices[0]
- try:
- node_ids_list = sfa_slice['node_ids']
- except KeyError:
- logger.log_exc("IOTLABAGGREGATE \t \
- get_slice_and_slivers No nodes in the slice \
- - KeyError ")
- node_ids_list = []
- # continue
-
- for node in node_ids_list:
- sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
- sliver_xrn.set_authority(self.driver.hrn)
- sliver = Sliver({'sliver_id': sliver_xrn.urn,
- 'name': sfa_slice['hrn'],
- 'type': 'iotlab-node',
- 'tags': []})
-
- slivers[node] = sliver
-
- #Add default sliver attribute :
- #connection information for iotlab
- # if get_authority(sfa_slice['hrn']) == \
- # self.driver.testbed_shell.root_auth:
- # tmp = sfa_slice['hrn'].split('.')
- # ldap_username = tmp[1].split('_')[0]
- # ssh_access = None
- # slivers['default_sliver'] = {'ssh': ssh_access,
- # 'login': ldap_username}
- # look in ldap:
- ldap_username = self.find_ldap_username_from_slice(sfa_slice)
-
- if ldap_username is not None:
- ssh_access = None
- slivers['default_sliver'] = {'ssh': ssh_access,
- 'login': ldap_username}
-
-
- logger.debug("IOTLABAGGREGATE api get_slice_and_slivers slivers %s "
- % (slivers))
- return (slices, slivers)
-
- def find_ldap_username_from_slice(self, sfa_slice):
- """
- Gets the ldap username of the user based on the information contained
- in ist sfa_slice record.
-
- :param sfa_slice: the user's slice record. Must contain the
- reg_researchers key.
- :type sfa_slice: dictionary
- :returns: ldap_username, the ldap user's login.
- :rtype: string
-
- """
- researchers = [sfa_slice['reg_researchers'][0].__dict__]
- # look in ldap:
- ldap_username = None
- ret = self.driver.testbed_shell.GetPersons(researchers)
- if len(ret) != 0:
- ldap_username = ret[0]['uid']
-
- return ldap_username
-
-
-
- def get_nodes(self, options=None):
- # def node_to_rspec_node(self, node, sites, node_tags,
- # grain=None, options={}):
- """Returns the nodes in the slice using the rspec format, with all the
- nodes' properties.
-
- Fetch the nodes ids in the slices dictionary and get all the nodes
- properties from OAR. Makes a rspec dicitonary out of this and returns
- it. If the slice does not have any job running or scheduled, that is
- it has no reserved nodes, then returns an empty list.
- :returns: An empty list if the slice has no reserved nodes, a rspec
- list with all the nodes and their properties (a dict per node)
- otherwise.
- :rtype: list
-
- .. seealso:: get_slice_and_slivers
-
- """
+ def leases_to_rspec_leases(self, leases):
+ """ Get leases attributes list"""
+ rspec_leases = []
+ for lease in leases:
+ for node in lease['resources']:
+ rspec_lease = Lease()
+ rspec_lease['lease_id'] = lease['id']
+ iotlab_xrn = Xrn('.'.join([self.driver.root_auth,
+ Xrn.escape(node)]),
+ type='node')
+ rspec_lease['component_id'] = iotlab_xrn.urn
+ rspec_lease['start_time'] = str(lease['date'])
+ duration = int(lease['duration'])/60 # duration in minutes
+ rspec_lease['duration'] = duration
+ rspec_lease['slice_id'] = lease['slice_id']
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
- # NT: the semantic of this function is not clear to me :
- # if slice is not defined, then all the nodes should be returned
- # if slice is defined, we should return only the nodes that
- # are part of this slice
- # but what is the role of the slivers parameter ?
- # So i assume that slice['node_ids'] will be the same as slivers for us
- filter_nodes = None
- if options:
- geni_available = options.get('geni_available')
- if geni_available == True:
- filter_nodes['boot_state'] = ['Alive']
-
- # slice_nodes_list = []
- # if slices is not None:
- # for one_slice in slices:
- # try:
- # slice_nodes_list = one_slice['node_ids']
- # # if we are dealing with a slice that has no node just
- # # return an empty list. In iotlab a slice can have multiple
- # # jobs scheduled, so it either has at least one lease or
- # # not at all.
- # except KeyError:
- # return []
-
- # get the granularity in second for the reservation system
- # grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- nodes = self.driver.testbed_shell.GetNodes(node_filter_dict =
- filter_nodes)
-
- nodes_dict = {}
-
- #if slices, this means we got to list all the nodes given to this slice
- # Make a list of all the nodes in the slice before getting their
- #attributes
- # rspec_nodes = []
-
- # logger.debug("IOTLABAGGREGATE api get_nodes slices %s "
- # % (slices))
-
- # reserved_nodes = self.driver.testbed_shell.GetNodesCurrentlyInUse()
- # logger.debug("IOTLABAGGREGATE api get_nodes slice_nodes_list %s "
- # % (slice_nodes_list))
- for node in nodes:
- nodes_dict[node['node_id']] = node
-
- return nodes_dict
def node_to_rspec_node(self, node):
- """ Creates a rspec node structure with the appropriate information
- based on the node information that can be found in the node dictionary.
-
- :param node: node data. this dict contains information about the node
- and must have the following keys : mobile, radio, archi, hostname,
- boot_state, site, x, y ,z (position).
- :type node: dictionary.
-
- :returns: node dictionary containing the following keys : mobile, archi,
- radio, component_id, component_name, component_manager_id,
- authority_id, boot_state, exclusive, hardware_types, location,
- position, granularity, tags.
- :rtype: dict
-
- """
-
- grain = self.driver.testbed_shell.GetLeaseGranularity()
-
+ """ Get node attributes """
rspec_node = IotlabNode()
- # xxx how to retrieve site['login_base']
- #site_id=node['site_id']
- #site=sites_dict[site_id]
-
rspec_node['mobile'] = node['mobile']
rspec_node['archi'] = node['archi']
- rspec_node['radio'] = node['radio']
-
- iotlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node['hostname'])
+ rspec_node['radio'] = (node['archi'].split(':'))[1]
+ iotlab_xrn = Xrn('.'.join([self.driver.root_auth,
+ Xrn.escape(node['network_address'])]),
+ type='node')
+ rspec_node['boot_state'] = 'true'
rspec_node['component_id'] = iotlab_xrn.urn
- rspec_node['component_name'] = node['hostname']
+ rspec_node['component_name'] = node['network_address']
rspec_node['component_manager_id'] = \
- hrn_to_urn(self.driver.testbed_shell.root_auth,
+ hrn_to_urn(self.driver.root_auth,
'authority+sa')
-
- # Iotlab's nodes are federated : there is only one authority
- # for all Iotlab sites, registered in SFA.
- # Removing the part including the site
- # in authority_id SA 27/07/12
rspec_node['authority_id'] = rspec_node['component_manager_id']
-
- # do not include boot state (<available> element)
- #in the manifest rspec
-
-
- rspec_node['boot_state'] = node['boot_state']
- # if node['hostname'] in reserved_nodes:
- # rspec_node['boot_state'] = "Reserved"
rspec_node['exclusive'] = 'true'
rspec_node['hardware_types'] = [HardwareType({'name': \
'iotlab-node'})]
-
location = IotlabLocation({'country':'France', 'site': \
node['site']})
rspec_node['location'] = location
-
-
position = IotlabPosition()
- for field in position :
- try:
- position[field] = node[field]
- except KeyError, error :
- logger.log_exc("IOTLABAGGREGATE\t get_nodes \
- position %s "% (error))
-
- rspec_node['position'] = position
-
-
- # Granularity
- granularity = Granularity({'grain': grain})
+ for field in position:
+ position[field] = node[field]
+ granularity = Granularity({'grain': 30})
rspec_node['granularity'] = granularity
rspec_node['tags'] = []
- # if node['hostname'] in slivers:
- # # add sliver info
- # sliver = slivers[node['hostname']]
- # rspec_node['sliver_id'] = sliver['sliver_id']
- # rspec_node['client_id'] = node['hostname']
- # rspec_node['slivers'] = [sliver]
-
- # # slivers always provide the ssh service
- # login = Login({'authentication': 'ssh-keys', \
- # 'hostname': node['hostname'], 'port':'22', \
- # 'username': sliver['name']})
- # service = Services({'login': login})
- # rspec_node['services'] = [service]
-
return rspec_node
- def rspec_node_to_geni_sliver(self, rspec_node, sliver_allocations = None):
- """Makes a geni sliver structure from all the nodes allocated
- to slivers in the sliver_allocations dictionary. Returns the states
- of the sliver.
-
- :param rspec_node: Node information contained in a rspec data structure
- fashion.
- :type rspec_node: dictionary
- :param sliver_allocations:
- :type sliver_allocations: dictionary
-
- :returns: Dictionary with the following keys: geni_sliver_urn,
- geni_expires, geni_allocation_status, geni_operational_status,
- geni_error.
-
- :rtype: dictionary
+ def sliver_to_rspec_node(self, sliver):
+ """ Get node and sliver attributes """
+ rspec_node = self.node_to_rspec_node(sliver)
+ rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ return rspec_node
- .. seealso:: node_to_rspec_node
- """
- if sliver_allocations is None: sliver_allocations={}
- if rspec_node['sliver_id'] in sliver_allocations:
- # set sliver allocation and operational status
- sliver_allocation = sliver_allocations[rspec_node['sliver_id']]
- if sliver_allocation:
- allocation_status = sliver_allocation.allocation_state
- if allocation_status == 'geni_allocated':
- op_status = 'geni_pending_allocation'
- elif allocation_status == 'geni_provisioned':
- op_status = 'geni_ready'
- else:
- op_status = 'geni_unknown'
- else:
- allocation_status = 'geni_unallocated'
- else:
- allocation_status = 'geni_unallocated'
- op_status = 'geni_failed'
- # required fields
+ @classmethod
+ def rspec_node_to_geni_sliver(cls, rspec_node):
+ """ Get sliver status """
geni_sliver = {'geni_sliver_urn': rspec_node['sliver_id'],
'geni_expires': rspec_node['expires'],
- 'geni_allocation_status' : allocation_status,
- 'geni_operational_status': op_status,
+ 'geni_allocation_status' : 'geni_allocated',
+ 'geni_operational_status': 'geni_pending_allocation',
'geni_error': '',
}
return geni_sliver
- def sliver_to_rspec_node(self, sliver, sliver_allocations):
- """Used by describe to format node information into a rspec compliant
- structure.
-
- Creates a node rspec compliant structure by calling node_to_rspec_node.
- Adds slivers, if any, to rspec node structure. Returns the updated
- rspec node struct.
-
- :param sliver: sliver dictionary. Contains keys: urn, slice_id, hostname
- and slice_name.
- :type sliver: dictionary
- :param sliver_allocations: dictionary of slivers
- :type sliver_allocations: dict
-
- :returns: Node dictionary with all necessary data.
-
- .. seealso:: node_to_rspec_node
+ def list_resources(self, version=None, options=None):
"""
- rspec_node = self.node_to_rspec_node(sliver)
- rspec_node['expires'] = datetime_to_string(utcparse(sliver['expires']))
- # add sliver info
- logger.debug("IOTLABAGGREGATE api \t sliver_to_rspec_node sliver \
- %s \r\nsliver_allocations %s" % (sliver,
- sliver_allocations))
- rspec_sliver = Sliver({'sliver_id': sliver['urn'],
- 'name': sliver['slice_id'],
- 'type': 'iotlab-exclusive',
- 'tags': []})
- rspec_node['sliver_id'] = rspec_sliver['sliver_id']
-
- if sliver['urn'] in sliver_allocations:
- rspec_node['client_id'] = sliver_allocations[
- sliver['urn']].client_id
- if sliver_allocations[sliver['urn']].component_id:
- rspec_node['component_id'] = sliver_allocations[
- sliver['urn']].component_id
- rspec_node['slivers'] = [rspec_sliver]
-
- # slivers always provide the ssh service
- login = Login({'authentication': 'ssh-keys',
- 'hostname': sliver['hostname'],
- 'port':'22',
- 'username': sliver['slice_name'],
- 'login': sliver['slice_name']
- })
- return rspec_node
-
-
- def get_leases(self, slice=None, options=None):
- if options is None: options={}
- filter={}
- if slice:
- filter.update({'slice_hrn':slice['slice_hrn']}) # JORDAN: this is = "upmc" !!!
- #filter.update({'name':slice['slice_name']})
- #return_fields = ['lease_id', 'hostname', 'site_id', 'name', 't_from', 't_until']
- leases = self.driver.GetLeases(lease_filter_dict=filter)
- grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
- #site = node['site_id']
- iotlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node)
- rspec_lease['component_id'] = iotlab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in testbed_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
-
- def get_all_leases(self, ldap_username):
- """
- Get list of lease dictionaries which all have the mandatory keys
- ('lease_id', 'hostname', 'site_id', 'name', 'start_time', 'duration').
- All the leases running or scheduled are returned.
-
- :param ldap_username: if ldap uid is not None, looks for the leases
- belonging to this user.
- :type ldap_username: string
- :returns: rspec lease dictionary with keys lease_id, component_id,
- slice_id, start_time, duration where the lease_id is the oar job id,
- component_id is the node's urn, slice_id is the slice urn,
- start_time is the timestamp starting time and duration is expressed
- in terms of the testbed's granularity.
- :rtype: dict
-
- .. note::There is no filtering of leases within a given time frame.
- All the running or scheduled leases are returned. options
- removed SA 15/05/2013
-
-
- """
-
- logger.debug("IOTLABAGGREGATE get_all_leases ldap_username %s "
- % (ldap_username))
- leases = self.driver.GetLeases(login=ldap_username)
- grain = self.driver.testbed_shell.GetLeaseGranularity()
-
- rspec_leases = []
- for lease in leases:
- #as many leases as there are nodes in the job
- for node in lease['reserved_nodes']:
- rspec_lease = Lease()
- rspec_lease['lease_id'] = lease['lease_id']
- #site = node['site_id']
- iotlab_xrn = xrn_object(self.driver.testbed_shell.root_auth,
- node)
- rspec_lease['component_id'] = iotlab_xrn.urn
- #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn,\
- #site, node['hostname'])
- try:
- rspec_lease['slice_id'] = lease['slice_id']
- except KeyError:
- #No info on the slice used in testbed_xp table
- pass
- rspec_lease['start_time'] = lease['t_from']
- rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
- / grain
- rspec_leases.append(rspec_lease)
- return rspec_leases
-
- def get_rspec(self, slice_xrn=None, login=None, version=None,
- options=None):
+ list_resources method sends a RSpec with all Iot-LAB testbed nodes
+ and leases (OAR job submission). For leases we get all OAR jobs with
+ state Waiting or Running. If we have an entry in SFA database
+ (lease table) with OAR job id this submission was launched by SFA
+ driver, otherwise it was launched by Iot-LAB Webportal or CLI-tools
+
+ :Example:
+ <rspec>
+ ...
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa"
+ component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.devgrenoble.iot-lab.info"
+ exclusive="true" component_name="m3-10.devgrenoble.iot-lab.info">
+ <hardware_type name="iotlab-node"/>
+ <location country="France"/>
+ <granularity grain="60"/>
+ ...
+ </node>
+ ...
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792400" duration="30">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/>
+ </lease>
+ ...
+ </rspec>
"""
- Returns xml rspec:
- - a full advertisement rspec with the testbed resources if slice_xrn is
- not specified.If a lease option is given, also returns the leases
- scheduled on the testbed.
- - a manifest Rspec with the leases and nodes in slice's leases if
- slice_xrn is not None.
-
- :param slice_xrn: srn of the slice
- :type slice_xrn: string
- :param login: user'uid (ldap login) on iotlab
- :type login: string
- :param version: can be set to sfa or iotlab
- :type version: RSpecVersion
- :param options: used to specify if the leases should also be included in
- the returned rspec.
- :type options: dict
-
- :returns: Xml Rspec.
- :rtype: XML
+ # pylint:disable=R0914,W0212
+ logger.warning("iotlabaggregate list_resources")
+ logger.warning("iotlabaggregate list_resources options %s" % options)
+ if not options:
+ options = {}
-
- """
-
- ldap_username = None
- rspec = None
version_manager = VersionManager()
version = version_manager.get_version(version)
- logger.debug("IotlabAggregate \t get_rspec ***version %s \
- version.type %s version.version %s options %s \r\n"
- % (version, version.type, version.version, options))
-
- if slice_xrn is None:
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
-
- else:
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
-
- slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
- if slice_xrn and slices is not None:
- #Get user associated with this slice
- #for one_slice in slices :
- ldap_username = self.find_ldap_username_from_slice(slices[0])
- # ldap_username = slices[0]['reg_researchers'][0].__dict__['hrn']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- logger.debug("IotlabAggregate \tget_rspec **** \
- LDAP USERNAME %s \r\n" \
- % (ldap_username))
- #at this point sliver may be empty if no iotlab job
- #is running for this user/slice.
+ rspec_version = version_manager._get_version(version.type,
+ version.version,
+ 'ad')
rspec = RSpec(version=rspec_version, user_options=options)
- logger.debug("\r\n \r\n IotlabAggregate \tget_rspec *** \
- slice_xrn %s slices %s\r\n \r\n"
- % (slice_xrn, slices))
-
- if options is not None:
- lease_option = options['list_leases']
- else:
- #If no options are specified, at least print the resources
- lease_option = 'all'
- #if slice_xrn :
- #lease_option = 'all'
-
- if lease_option in ['all', 'resources']:
- #if not options.get('list_leases') or options.get('list_leases')
- #and options['list_leases'] != 'leases':
- nodes = self.get_nodes()
- logger.debug("\r\n")
- logger.debug("IotlabAggregate \t lease_option %s \
- get rspec ******* nodes %s"
- % (lease_option, nodes))
-
- sites_set = set([node['location']['site'] for node in nodes])
-
- #In case creating a job, slice_xrn is not set to None
- rspec.version.add_nodes(nodes)
- if slice_xrn and slices is not None:
- # #Get user associated with this slice
- # #for one_slice in slices :
- # ldap_username = slices[0]['reg_researchers']
- # # ldap_username = slices[0]['user']
- # tmp = ldap_username.split('.')
- # ldap_username = tmp[1]
- # # ldap_username = tmp[1].split('_')[0]
-
- logger.debug("IotlabAggregate \tget_rspec **** \
- version type %s ldap_ user %s \r\n" \
- % (version.type, ldap_username))
- if version.type == "Iotlab":
- rspec.version.add_connection_information(
- ldap_username, sites_set)
-
- default_sliver = slivers.get('default_sliver', [])
- if default_sliver and len(nodes) is not 0:
- #default_sliver_attribs = default_sliver.get('tags', [])
- logger.debug("IotlabAggregate \tget_rspec **** \
- default_sliver%s \r\n" % (default_sliver))
- for attrib in default_sliver:
- rspec.version.add_default_sliver_attribute(
- attrib, default_sliver[attrib])
-
- if lease_option in ['all','leases']:
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
- logger.debug("IotlabAggregate \tget_rspec **** \
- FINAL RSPEC %s \r\n" % (rspec.toxml()))
- return rspec.toxml()
-
- def get_slivers(self, urns, options=None):
- """Get slivers of the given slice urns. Slivers contains slice, node and
- user information.
-
- For Iotlab, returns the leases with sliver ids and their allocation
- status.
+ nodes = self.driver.shell.get_nodes()
+ reserved_nodes = self.driver.shell.get_reserved_nodes()
+ if not 'error' in nodes and not 'error' in reserved_nodes:
+ # convert nodes to rspec nodes
+ rspec_nodes = []
+ for node in nodes:
+ rspec_node = self.node_to_rspec_node(nodes[node])
+ rspec_nodes.append(rspec_node)
+ rspec.version.add_nodes(rspec_nodes)
- :param urns: list of slice urns.
- :type urns: list of strings
- :param options: unused
- :type options: unused
+ leases = []
+ db_leases = {}
+ # find OAR jobs id for all slices in SFA database
+ for lease in self.driver.api.dbsession().query(LeaseTable).all():
+ db_leases[lease.job_id] = lease.slice_hrn
+
+ for lease_id in reserved_nodes:
+ # onelab slice = job submission from OneLAB
+ if lease_id in db_leases:
+ reserved_nodes[lease_id]['slice_id'] = \
+ hrn_to_urn(db_leases[lease_id],
+ 'slice')
+ # iotlab slice = job submission from Iot-LAB
+ else:
+ reserved_nodes[lease_id]['slice_id'] = \
+ hrn_to_urn(self.driver.root_auth+'.'+
+ reserved_nodes[lease_id]['owner']+"_slice",
+ 'slice')
+ leases.append(reserved_nodes[lease_id])
+
+ rspec_leases = self.leases_to_rspec_leases(leases)
+ logger.warning("iotlabaggregate list_resources rspec_leases %s" %
+ rspec_leases)
+ rspec.version.add_leases(rspec_leases)
+ return rspec.toxml()
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
- """
- # JORDAN using SLICE_KEY for slice_hrn
- SLICE_KEY = 'slice_hrn' # slice_hrn
- if options is None: options={}
- slice_ids = set()
- node_ids = []
- for urn in urns:
- xrn = IotlabXrn(xrn=urn)
- if xrn.type == 'sliver':
- # id: slice_id-node_id
- try:
- sliver_id_parts = xrn.get_sliver_id_parts()
- slice_id = int(sliver_id_parts[0])
- node_id = int(sliver_id_parts[1])
- slice_ids.add(slice_id)
- node_ids.append(node_id)
- except ValueError:
- pass
- else:
- slice_names = set()
- slice_names.add(xrn.hrn)
-
-
- logger.debug("IotlabAggregate \t get_slivers urns %s slice_ids %s \
- node_ids %s\r\n" % (urns, slice_ids, node_ids))
- logger.debug("IotlabAggregate \t get_slivers xrn %s slice_names %s \
- \r\n" % (xrn, slice_names))
- filter_sliver = {}
- if slice_names:
- filter_sliver[SLICE_KEY] = list(slice_names)
- slice_hrn = filter_sliver[SLICE_KEY][0]
-
- slice_filter_type = SLICE_KEY
-
- # if slice_ids:
- # filter['slice_id'] = list(slice_ids)
- # # get slices
- if slice_hrn:
- #logger.debug("JORDAN SLICE_HRN=%r" % slice_hrn)
- slices = self.driver.GetSlices(slice_hrn,
- slice_filter_type)
- leases = self.driver.GetLeases({SLICE_KEY:slice_hrn})
- logger.debug("IotlabAggregate \t get_slivers \
- slices %s leases %s\r\n" % (slices, leases ))
- if not slices:
- return []
-
- single_slice = slices[0]
- # get sliver users
- # XXX LOIC !!! XXX QUICK AND DIRTY - Let's try...
- logger.debug("LOIC Number of reg_researchers = %s" % len(single_slice['reg_researchers']))
- if 'reg_researchers' in single_slice and len(single_slice['reg_researchers'])==0:
- user = {'uid':single_slice['user']}
- else:
- user = single_slice['reg_researchers'][0].__dict__
-
- user = single_slice['reg_researchers'][0].__dict__
- logger.debug("IotlabAggregate \t get_slivers user %s \
- \r\n" % (user))
-
- # construct user key info
- person = self.driver.testbed_shell.ldap.LdapFindUser(record=user)
- logger.debug("IotlabAggregate \t get_slivers person %s \
- \r\n" % (person))
- # name = person['last_name']
- user['login'] = person['uid']
- # XXX LOIC !!! if we have more info, let's fill user
- if 'hrn' in user:
- user['user_urn'] = hrn_to_urn(user['hrn'], 'user')
- if 'keys' in user:
- user['keys'] = person['pkey']
-
-
- try:
- logger.debug("############################################ iotlab AM : single_slice = %s" % single_slice)
- node_ids = single_slice['node_ids']
- node_list = self.driver.testbed_shell.GetNodes()
- logger.debug("############################################ iotlab AM : node_list = %s" % node_list)
-# JORDAN REMOVED FILTER so that next check always succeed
-# {'hostname':single_slice['node_ids']})
- node_by_hostname = dict([(node['hostname'], node)
- for node in node_list])
- except KeyError:
- logger.warning("\t get_slivers No slivers in slice")
- # slice['node_ids'] = node_ids
- # nodes_dict = self.get_slice_nodes(slice, options)
+ def get_slivers(self, urns, leases, nodes):
+ """ Get slivers attributes list """
+ logger.warning("iotlabaggregate get_slivers")
+ logger.warning("iotlabaggregate get_slivers urns %s" % urns)
slivers = []
- for current_lease in leases:
- for hostname in current_lease['reserved_nodes']:
- node = {}
- node['slice_id'] = current_lease['slice_id']
- node['slice_hrn'] = current_lease['slice_hrn']
- slice_name = current_lease['slice_hrn'].split(".")[1]
- node['slice_name'] = slice_name
- index = current_lease['reserved_nodes'].index(hostname)
- node_id = current_lease['resource_ids'][index]
- # node['slice_name'] = user['login']
- # node.update(single_slice)
- # JORDAN XXX This fails sometimes when hostname not in the list
- #if hostname in node_by_hostname:
- more_info = node_by_hostname[hostname]
- node.update(more_info)
- #else:
- # # This can happen when specifying a lease without the resource, then all subsequent calls will fail
- # logger.debug("Ignored missing hostname for now one")
- # oar_job_id is the slice_id (lease_id)
+ for lease in leases:
+ for node in lease['resources']:
+ sliver_node = nodes[node]
sliver_hrn = '%s.%s-%s' % (self.driver.hrn,
- current_lease['lease_id'], node_id)
- node['node_id'] = node_id
- node['expires'] = current_lease['t_until']
- node['sliver_id'] = Xrn(sliver_hrn, type='sliver').urn
- node['urn'] = node['sliver_id']
- node['services_user'] = [user]
-
- slivers.append(node)
+ lease['id'], node.split(".")[0])
+ start_time = datetime.datetime.fromtimestamp(lease['date'])
+ duration = datetime.timedelta(seconds=int(lease['duration']))
+ sliver_node['expires'] = start_time + duration
+ sliver_node['sliver_id'] = Xrn(sliver_hrn,
+ type='sliver').urn
+ slivers.append(sliver_node)
return slivers
- def list_resources(self, version = None, options=None):
- """
- Returns an advertisement Rspec of available resources at this
- aggregate. This Rspec contains a resource listing along with their
- description, providing sufficient information for clients to be able to
- select among available resources.
-
- :param options: various options. The valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}} . The only
- mandatory options if options is specified is geni_rspec_version.
- :type options: dictionary
-
- :returns: On success, the value field of the return struct will contain
- a geni.rspec advertisment RSpec
- :rtype: Rspec advertisement in xml.
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#RSpecdatatype
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#ListResources
- """
- if options is None: options={}
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(version.type,
- version.version, 'ad')
- rspec = RSpec(version=rspec_version, user_options=options)
- # variable ldap_username to be compliant with get_all_leases
- # prototype. Now unused in geni-v3 since we are getting all the leases
- # here
- ldap_username = None
- if not options.get('list_leases') or options['list_leases'] != 'leases':
- # get nodes
- nodes_dict = self.get_nodes(options)
-
- # no interfaces on iotlab nodes
- # convert nodes to rspec nodes
- rspec_nodes = []
- for node_id in nodes_dict:
- node = nodes_dict[node_id]
- rspec_node = self.node_to_rspec_node(node)
- rspec_nodes.append(rspec_node)
- rspec.version.add_nodes(rspec_nodes)
-
- # add links
- # links = self.get_links(sites, nodes_dict, interfaces)
- # rspec.version.add_links(links)
-
- if not options.get('list_leases') or options.get('list_leases') \
- and options['list_leases'] != 'resources':
- leases = self.get_all_leases(ldap_username)
- rspec.version.add_leases(leases)
-
- return rspec.toxml()
+ def _delete_db_lease(self, job_id):
+ """ Delete lease table row in SFA database """
+ logger.warning("iotlabdriver _delete_db_lease lease job_id : %s"
+ % job_id)
+ self.driver.api.dbsession().query(LeaseTable).filter(
+ LeaseTable.job_id == job_id).delete()
+ self.driver.api.dbsession().commit()
def describe(self, urns, version=None, options=None):
"""
- Retrieve a manifest RSpec describing the resources contained by the
- named entities, e.g. a single slice or a set of the slivers in a slice.
- This listing and description should be sufficiently descriptive to allow
- experimenters to use the resources.
-
- :param urns: If a slice urn is supplied and there are no slivers in the
- given slice at this aggregate, then geni_rspec shall be a valid
- manifest RSpec, containing no node elements - no resources.
- :type urns: list or strings
- :param options: various options. the valid options are: {boolean
- geni_compressed <optional>; struct geni_rspec_version { string type;
- #case insensitive , string version; # case insensitive}}
- :type options: dictionary
-
- :returns: On success returns the following dictionary {geni_rspec:
- <geni.rspec, a Manifest RSpec>, geni_urn: <string slice urn of the
- containing slice>, geni_slivers:{ geni_sliver_urn:
- <string sliver urn>, geni_expires: <dateTime.rfc3339 allocation
- expiration string, as in geni_expires from SliversStatus>,
- geni_allocation_status: <string sliver state - e.g. geni_allocated
- or geni_provisioned >, geni_operational_status:
- <string sliver operational state>, geni_error: <optional string.
- The field may be omitted entirely but may not be null/None,
- explaining any failure for a sliver.>}
-
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3#Describe
- .. seealso:: http://groups.geni.net/geni/wiki/GAPI_AM_API_V3/CommonConcepts#urns
+ describe method returns slice slivers (allocated resources) and leases
+ (OAR job submission). We search in lease table of SFA database all OAR
+ jobs id for this slice and match OAR jobs with state Waiting or Running.
+ If OAR job id doesn't exist the experiment is terminated and we delete
+ the database table entry. Otherwise we add slivers and leases in the
+ response
+
+ :returns:
+ geni_slivers : a list of allocated slivers with information about
+ their allocation and operational state
+ geni_urn : the URN of the slice in which the sliver has been
+ allocated
+ geni_rspec: a RSpec describing the allocated slivers and leases
+ :rtype: dict
+
+ :Example:
+ <rspec>
+ ...
+ <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa"
+ component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"
+ client_id="m3-10.grenoble.iot-lab.info"
+ sliver_id="urn:publicid:IDN+iotlab+sliver+9953-m3-10"
+ exclusive="true" component_name="m3-10.grenoble.iot-lab.info">
+ <hardware_type name="iotlab-node"/>
+ <location country="France"/>
+ <granularity grain="30"/>
+ <sliver_type name="iotlab-exclusive"/>
+ </node>
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792428" duration="29">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/>
+ </lease>
+ ...
+ </rspec>
+
"""
- if options is None: options={}
+ # pylint:disable=R0914,W0212
+ logger.warning("iotlabaggregate describe")
+ logger.warning("iotlabaggregate describe urns : %s" % urns)
+ if not options:
+ options = {}
version_manager = VersionManager()
version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
+ rspec_version = version_manager._get_version(version.type,
+ version.version,
+ 'manifest')
rspec = RSpec(version=rspec_version, user_options=options)
-
- # get slivers
+ xrn = Xrn(urns[0])
geni_slivers = []
- slivers = self.get_slivers(urns, options)
- if slivers:
- rspec_expires = datetime_to_string(utcparse(slivers[0]['expires']))
- else:
- rspec_expires = datetime_to_string(utcparse(time.time()))
- rspec.xml.set('expires', rspec_expires)
-
- # lookup the sliver allocations
- geni_urn = urns[0]
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- constraint = SliverAllocation.sliver_id.in_(sliver_ids)
- query = self.driver.api.dbsession().query(SliverAllocation)
- sliver_allocations = query.filter((constraint)).all()
- sliver_allocation_dict = {}
- for sliver_allocation in sliver_allocations:
- geni_urn = sliver_allocation.slice_urn
- sliver_allocation_dict[sliver_allocation.sliver_id] = \
- sliver_allocation
- # JORDAN get the option list_leases if we want to have the leases in describe
- show_leases = options.get('list_leases')
- if show_leases in ['resources', 'all']:
- #if not options.get('list_leases') or options['list_leases'] != 'leases':
- # add slivers
- nodes_dict = {}
- for sliver in slivers:
- nodes_dict[sliver['node_id']] = sliver
+
+ nodes = self.driver.shell.get_nodes()
+ reserved_nodes = self.driver.shell.get_reserved_nodes()
+ if not 'error' in nodes and not 'error' in reserved_nodes:
+ # find OAR jobs id for one slice in SFA database
+ db_leases = [(lease.job_id, lease.slice_hrn)
+ for lease in self.driver.api.dbsession()
+ .query(LeaseTable)
+ .filter(LeaseTable.slice_hrn == xrn.hrn).all()]
+
+ leases = []
+ for job_id, slice_hrn in db_leases:
+ # OAR job terminated, we delete entry in database
+ if not job_id in reserved_nodes:
+ self._delete_db_lease(job_id)
+ else:
+ # onelab slice = job submission from OneLAB
+ lease = reserved_nodes[job_id]
+ lease['slice_id'] = hrn_to_urn(slice_hrn, 'slice')
+ leases.append(lease)
+
+ # get slivers
+ slivers = self.get_slivers(urns, leases, nodes)
+ if slivers:
+ date = utcparse(slivers[0]['expires'])
+ rspec_expires = datetime_to_string(date)
+ else:
+ rspec_expires = datetime_to_string(utcparse(time.time()))
+ rspec.xml.set('expires', rspec_expires)
+
rspec_nodes = []
+
for sliver in slivers:
- rspec_node = self.sliver_to_rspec_node(sliver,
- sliver_allocation_dict)
+ rspec_node = self.sliver_to_rspec_node(sliver)
rspec_nodes.append(rspec_node)
- geni_sliver = self.rspec_node_to_geni_sliver(rspec_node,
- sliver_allocation_dict)
+ geni_sliver = self.rspec_node_to_geni_sliver(rspec_node)
geni_slivers.append(geni_sliver)
+ logger.warning("iotlabaggregate describe geni_slivers %s" %
+ geni_slivers)
rspec.version.add_nodes(rspec_nodes)
- if show_leases in ['leases', 'all']:
- #if not options.get('list_leases') or options['list_leases'] == 'resources':
- if slivers:
- leases = self.get_leases(slice=slivers[0])
- logger.debug("JORDAN: getting leases from slice: %r" % slivers[0])
- rspec.version.add_leases(leases)
+ rspec_leases = self.leases_to_rspec_leases(leases)
+ logger.warning("iotlabaggregate describe rspec_leases %s" %
+ rspec_leases)
+ rspec.version.add_leases(rspec_leases)
- return {'geni_urn': geni_urn,
+ return {'geni_urn': urns[0],
'geni_rspec': rspec.toxml(),
'geni_slivers': geni_slivers}
+
-"""
-Implements what a driver should provide for SFA to work.
-"""
-from datetime import datetime
-from sfa.util.faults import SliverDoesNotExist, Forbidden
-from sfa.util.sfalogging import logger
-
-from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
-from sfa.util.sfatime import utcparse, datetime_to_string
-from sfa.trust.certificate import Keypair, convert_public_key
-
-from sfa.trust.hierarchy import Hierarchy
-from sfa.trust.gid import create_uuid
+# -*- coding:utf-8 -*-
+""" driver class management """
-from sfa.managers.driver import Driver
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, urn_to_hrn
from sfa.rspecs.version_manager import VersionManager
from sfa.rspecs.rspec import RSpec
+from sfa.managers.driver import Driver
+from sfa.iotlab.iotlabshell import IotLABShell
+from sfa.iotlab.iotlabaggregate import IotLABAggregate
+from sfa.iotlab.iotlablease import LeaseTable
-from sfa.iotlab.iotlabxrn import IotlabXrn, xrn_object, xrn_to_hostname
-from sfa.util.xrn import Xrn, hrn_to_urn, get_authority, urn_to_hrn
-from sfa.iotlab.iotlabaggregate import IotlabAggregate
-
-from sfa.iotlab.iotlabslices import IotlabSlices
-
-from sfa.trust.credential import Credential
-from sfa.storage.model import SliverAllocation
-
-from sfa.iotlab.iotlabshell import IotlabShell
-from sqlalchemy.orm import joinedload
-from sfa.iotlab.iotlabpostgres import LeaseTableXP
-
-class IotlabDriver(Driver):
- """ Iotlab Driver class inherited from Driver generic class.
-
- Contains methods compliant with the SFA standard and the testbed
- infrastructure (calls to LDAP and OAR).
-
- .. seealso::: Driver class
-
+class IotLabDriver(Driver):
+ """
+ SFA driver for Iot-LAB testbed
"""
- def __init__(self, api):
- """
-
- Sets the iotlab SFA config parameters,
- instanciates the testbed api .
-
- :param api: SfaApi configuration object. Holds reference to the
- database.
- :type api: SfaApi object
- """
+ def __init__(self, api):
Driver.__init__(self, api)
- self.api = api
config = api.config
- self.testbed_shell = IotlabShell(config)
+ self.api = api
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.shell = IotLABShell()
+ # need by sfa driver
self.cache = None
- def GetPeers(self, peer_filter=None ):
- """ Gathers registered authorities in SFA DB and looks for specific peer
- if peer_filter is specified.
- :param peer_filter: name of the site authority looked for.
- :type peer_filter: string
- :returns: list of records.
-
- """
-
- existing_records = {}
- existing_hrns_by_types = {}
- logger.debug("IOTLAB_API \tGetPeers peer_filter %s " % (peer_filter))
- query = self.api.dbsession().query(RegRecord)
- all_records = query.filter(RegRecord.type.like('%authority%')).all()
-
- for record in all_records:
- existing_records[(record.hrn, record.type)] = record
- if record.type not in existing_hrns_by_types:
- existing_hrns_by_types[record.type] = [record.hrn]
- else:
- existing_hrns_by_types[record.type].append(record.hrn)
-
- logger.debug("IOTLAB_API \tGetPeer\texisting_hrns_by_types %s "
- % (existing_hrns_by_types))
- records_list = []
-
- try:
- if peer_filter:
- records_list.append(existing_records[(peer_filter,
- 'authority')])
- else:
- for hrn in existing_hrns_by_types['authority']:
- records_list.append(existing_records[(hrn, 'authority')])
-
- logger.debug("IOTLAB_API \tGetPeer \trecords_list %s "
- % (records_list))
-
- except KeyError:
- pass
-
- return_records = records_list
- logger.debug("IOTLAB_API \tGetPeer return_records %s "
- % (return_records))
- return return_records
-
- def GetKeys(self, key_filter=None):
- """Returns a dict of dict based on the key string. Each dict entry
- contains the key id, the ssh key, the user's email and the
- user's hrn.
- If key_filter is specified and is an array of key identifiers,
- only keys matching the filter will be returned.
-
- Admin may query all keys. Non-admins may only query their own keys.
- FROM PLC API DOC
-
- :returns: dict with ssh key as key and dicts as value.
- :rtype: dict
- """
- query = self.api.dbsession().query(RegKey)
- if key_filter is None:
- keys = query.options(joinedload('reg_user')).all()
- else:
- constraint = RegKey.key.in_(key_filter)
- keys = query.options(joinedload('reg_user')).filter(constraint).all()
-
- key_dict = {}
- for key in keys:
- key_dict[key.key] = {'key_id': key.key_id, 'key': key.key,
- 'email': key.reg_user.email,
- 'hrn': key.reg_user.hrn}
-
- #ldap_rslt = self.ldap.LdapSearch({'enabled']=True})
- #user_by_email = dict((user[1]['mail'][0], user[1]['sshPublicKey']) \
- #for user in ldap_rslt)
-
- logger.debug("IOTLAB_API GetKeys -key_dict %s \r\n " % (key_dict))
- return key_dict
-
-
-
- def AddPerson(self, record):
- """
-
- Adds a new account. Any fields specified in records are used,
- otherwise defaults are used. Creates an appropriate login by calling
- LdapAddUser.
-
- :param record: dictionary with the sfa user's properties.
- :returns: a dicitonary with the status. If successful, the dictionary
- boolean is set to True and there is a 'uid' key with the new login
- added to LDAP, otherwise the bool is set to False and a key
- 'message' is in the dictionary, with the error message.
- :rtype: dict
-
- """
- ret = self.testbed_shell.ldap.LdapAddUser(record)
-
- if ret['bool'] is True:
- #record['hrn'] = self.testbed_shell.root_auth + '.' + ret['uid']
- logger.debug("IOTLAB_API AddPerson return code %s record %s "
- % (ret, record))
- #self.__add_person_to_db(record)
- return ret
-
- def add_person_to_db(self, user_dict):
- """
- Add a federated user straight to db when the user issues a lease
- request with iotlab nodes and that he has not registered with iotlab
- yet (that is he does not have a LDAP entry yet).
- Uses parts of the routines in IotlabImport when importing user from
- LDAP. Called by AddPerson, right after LdapAddUser.
- :param user_dict: Must contain email, hrn and pkey to get a GID
- and be added to the SFA db.
- :type user_dict: dict
-
- """
- query = self.api.dbsession().query(RegUser)
- check_if_exists = query.filter_by(email = user_dict['email']).first()
- #user doesn't exists
- if not check_if_exists:
- logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
- " %(user_dict))
- hrn = user_dict['hrn']
- person_urn = hrn_to_urn(hrn, 'user')
- try:
- pubkey = user_dict['pkey']
- pkey = convert_public_key(pubkey)
- except TypeError:
- #key not good. create another pkey
- logger.warn('__add_person_to_db: no public key or unable to convert public \
- key for %s' %(hrn ))
- pkey = Keypair(create=True)
-
-
- if pubkey is not None and pkey is not None :
- hierarchy = Hierarchy()
- # We fake the parent in order to be able to create a valid GID
- person_gid = hierarchy.create_gid(person_urn, create_uuid(), \
- pkey, force_parent='iotlab')
- if user_dict['email']:
- logger.debug("__add_person_to_db \r\n \r\n \
- IOTLAB IMPORTER PERSON EMAIL OK email %s "\
- %(user_dict['email']))
- person_gid.set_email(user_dict['email'])
-
- user_record = RegUser(hrn=hrn , pointer= '-1', \
- authority=get_authority(hrn), \
- email=user_dict['email'], gid = person_gid)
- #user_record.reg_keys = [RegKey(user_dict['pkey'])]
- user_record.just_created()
- self.api.dbsession().add (user_record)
- self.api.dbsession().commit()
- return
-
-
-
- def _sql_get_slice_info(self, slice_filter):
- """
- Get the slice record based on the slice hrn. Fetch the record of the
- user associated with the slice by using joinedload based on the
- reg_researchers relationship.
-
- :param slice_filter: the slice hrn we are looking for
- :type slice_filter: string
- :returns: the slice record enhanced with the user's information if the
- slice was found, None it wasn't.
-
- :rtype: dict or None.
- """
- #DO NOT USE RegSlice - reg_researchers to get the hrn
- #of the user otherwise will mess up the RegRecord in
- #Resolve, don't know why - SA 08/08/2012
-
- #Only one entry for one user = one slice in testbed_xp table
- #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
-
- raw_slicerec = self.api.dbsession().query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn=slice_filter).first()
- #raw_slicerec = self.api.dbsession().query(RegRecord).filter_by(hrn = slice_filter).first()
- if raw_slicerec:
- #load_reg_researchers
- #raw_slicerec.reg_researchers
- raw_slicerec = raw_slicerec.__dict__
- logger.debug(" IOTLAB_API \t _sql_get_slice_info slice_filter %s \
- raw_slicerec %s" % (slice_filter, raw_slicerec))
- slicerec = raw_slicerec
- #only one researcher per slice so take the first one
- #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
- #del slicerec['reg_researchers']['_sa_instance_state']
- return slicerec
-
- else:
- return None
-
- def _sql_get_slice_info_from_user(self, slice_filter):
- """
- Get the slice record based on the user recordid by using a joinedload
- on the relationship reg_slices_as_researcher. Format the sql record
- into a dict with the mandatory fields for user and slice.
- :returns: dict with slice record and user record if the record was found
- based on the user's id, None if not..
- :rtype:dict or None..
- """
- #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
- raw_slicerec = self.api.dbsession().query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id=slice_filter).first()
- #raw_slicerec = self.api.dbsession().query(RegRecord).filter_by(record_id = slice_filter).first()
- #Put it in correct order
- user_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'email', 'pointer']
- slice_needed_fields = ['peer_authority', 'hrn', 'last_updated',
- 'classtype', 'authority', 'gid', 'record_id',
- 'date_created', 'type', 'pointer']
- if raw_slicerec:
- #raw_slicerec.reg_slices_as_researcher
- raw_slicerec = raw_slicerec.__dict__
- slicerec = {}
- slicerec = \
- dict([(k, raw_slicerec[
- 'reg_slices_as_researcher'][0].__dict__[k])
- for k in slice_needed_fields])
- slicerec['reg_researchers'] = dict([(k, raw_slicerec[k])
- for k in user_needed_fields])
- #TODO Handle multiple slices for one user SA 10/12/12
- #for now only take the first slice record associated to the rec user
- ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
- #del raw_slicerec['reg_slices_as_researcher']
- #slicerec['reg_researchers'] = raw_slicerec
- ##del slicerec['_sa_instance_state']
-
- return slicerec
-
- else:
- return None
-
-
-
- def _get_slice_records(self, slice_filter=None,
- slice_filter_type=None):
- """
- Get the slice record depending on the slice filter and its type.
- :param slice_filter: Can be either the slice hrn or the user's record
- id.
- :type slice_filter: string
- :param slice_filter_type: describes the slice filter type used, can be
- slice_hrn or record_id_user
- :type: string
- :returns: the slice record
- :rtype:dict
- .. seealso::_sql_get_slice_info_from_user
- .. seealso:: _sql_get_slice_info
- """
-
- #Get list of slices based on the slice hrn
- if slice_filter_type == 'slice_hrn':
-
- #if get_authority(slice_filter) == self.root_auth:
- #login = slice_filter.split(".")[1].split("_")[0]
-
- slicerec = self._sql_get_slice_info(slice_filter)
-
- if slicerec is None:
- return None
- #return login, None
-
- #Get slice based on user id
- if slice_filter_type == 'record_id_user':
-
- slicerec = self._sql_get_slice_info_from_user(slice_filter)
-
- if slicerec:
- fixed_slicerec_dict = slicerec
- #At this point if there is no login it means
- #record_id_user filter has been used for filtering
- #if login is None :
- ##If theslice record is from iotlab
- #if fixed_slicerec_dict['peer_authority'] is None:
- #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
- #return login, fixed_slicerec_dict
- return fixed_slicerec_dict
- else:
- return None
-
-
-
- def GetSlices(self, slice_filter=None, slice_filter_type=None,
- login=None):
- """Get the slice records from the iotlab db and add lease information
- if any.
-
- :param slice_filter: can be the slice hrn or slice record id in the db
- depending on the slice_filter_type.
- :param slice_filter_type: defines the type of the filtering used, Can be
- either 'slice_hrn' or "record_id'.
- :type slice_filter: string
- :type slice_filter_type: string
- :returns: a slice dict if slice_filter and slice_filter_type
- are specified and a matching entry is found in the db. The result
- is put into a list.Or a list of slice dictionnaries if no filters
- arespecified.
-
- :rtype: list
-
- """
- #login = None
- authorized_filter_types_list = ['slice_hrn', 'record_id_user']
- return_slicerec_dictlist = []
-
- #First try to get information on the slice based on the filter provided
- if slice_filter_type in authorized_filter_types_list:
- fixed_slicerec_dict = self._get_slice_records(slice_filter,
- slice_filter_type)
- # if the slice was not found in the sfa db
- if fixed_slicerec_dict is None:
- return return_slicerec_dictlist
-
- slice_hrn = fixed_slicerec_dict['hrn']
-
- logger.debug(" IOTLAB_API \tGetSlices login %s \
- slice record %s slice_filter %s \
- slice_filter_type %s " % (login,
- fixed_slicerec_dict, slice_filter,
- slice_filter_type))
-
-
- #Now we have the slice record fixed_slicerec_dict, get the
- #jobs associated to this slice
- leases_list = []
-
- leases_list = self.GetLeases(login=login)
- #If no job is running or no job scheduled
- #return only the slice record
- if leases_list == [] and fixed_slicerec_dict:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
-
- # if the jobs running don't belong to the user/slice we are looking
- # for
- leases_hrn = [lease['slice_hrn'] for lease in leases_list]
- if slice_hrn not in leases_hrn:
- return_slicerec_dictlist.append(fixed_slicerec_dict)
- #If several jobs for one slice , put the slice record into
- # each lease information dict
- for lease in leases_list:
- slicerec_dict = {}
- logger.debug("IOTLAB_API.PY \tGetSlices slice_filter %s \
- \t lease['slice_hrn'] %s"
- % (slice_filter, lease['slice_hrn']))
- if lease['slice_hrn'] == slice_hrn:
- slicerec_dict['oar_job_id'] = lease['lease_id']
- #Update lease dict with the slice record
- if fixed_slicerec_dict:
- fixed_slicerec_dict['oar_job_id'] = []
- fixed_slicerec_dict['oar_job_id'].append(
- slicerec_dict['oar_job_id'])
- slicerec_dict.update(fixed_slicerec_dict)
- #slicerec_dict.update({'hrn':\
- #str(fixed_slicerec_dict['slice_hrn'])})
- slicerec_dict['slice_hrn'] = lease['slice_hrn']
- slicerec_dict['hrn'] = lease['slice_hrn']
- slicerec_dict['user'] = lease['user']
- slicerec_dict.update(
- {'list_node_ids':
- {'hostname': lease['reserved_nodes']}})
- slicerec_dict.update({'node_ids': lease['reserved_nodes']})
-
-
-
- return_slicerec_dictlist.append(slicerec_dict)
-
- logger.debug("IOTLAB_API.PY \tGetSlices \
- slicerec_dict %s return_slicerec_dictlist %s \
- lease['reserved_nodes'] \
- %s" % (slicerec_dict, return_slicerec_dictlist,
- lease['reserved_nodes']))
-
- logger.debug("IOTLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s"
- % (return_slicerec_dictlist))
-
- return return_slicerec_dictlist
-
-
- else:
- #Get all slices from the iotlab sfa database ,
- #put them in dict format
- #query_slice_list = dbsession.query(RegRecord).all()
- query_slice_list = \
- self.api.dbsession().query(RegSlice).options(joinedload('reg_researchers')).all()
-
- for record in query_slice_list:
- tmp = record.__dict__
- tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- #del tmp['reg_researchers']['_sa_instance_state']
- return_slicerec_dictlist.append(tmp)
- #return_slicerec_dictlist.append(record.__dict__)
-
- #Get all the jobs reserved nodes
- leases_list = self.testbed_shell.GetReservedNodes()
-
- for fixed_slicerec_dict in return_slicerec_dictlist:
- slicerec_dict = {}
- #Check if the slice belongs to a iotlab user
- if fixed_slicerec_dict['peer_authority'] is None:
- owner = fixed_slicerec_dict['hrn'].split(
- ".")[1].split("_")[0]
- else:
- owner = None
- for lease in leases_list:
- if owner == lease['user']:
- slicerec_dict['oar_job_id'] = lease['lease_id']
-
- #for reserved_node in lease['reserved_nodes']:
- logger.debug("IOTLAB_API.PY \tGetSlices lease %s "
- % (lease))
- slicerec_dict.update(fixed_slicerec_dict)
- slicerec_dict.update({'node_ids':
- lease['reserved_nodes']})
- slicerec_dict.update({'list_node_ids':
- {'hostname':
- lease['reserved_nodes']}})
-
- #slicerec_dict.update({'hrn':\
- #str(fixed_slicerec_dict['slice_hrn'])})
- #return_slicerec_dictlist.append(slicerec_dict)
- fixed_slicerec_dict.update(slicerec_dict)
-
- logger.debug("IOTLAB_API.PY \tGetSlices RETURN \
- return_slicerec_dictlist %s \t slice_filter %s " \
- %(return_slicerec_dictlist, slice_filter))
-
- return return_slicerec_dictlist
-
- def AddLeases(self, hostname_list, slice_record,
- lease_start_time, lease_duration):
-
- """Creates a job in OAR corresponding to the information provided
- as parameters. Adds the job id and the slice hrn in the iotlab
- database so that we are able to know which slice has which nodes.
-
- :param hostname_list: list of nodes' OAR hostnames.
- :param slice_record: sfa slice record, must contain login and hrn.
- :param lease_start_time: starting time , unix timestamp format
- :param lease_duration: duration in minutes
-
- :type hostname_list: list
- :type slice_record: dict
- :type lease_start_time: integer
- :type lease_duration: integer
- :returns: job_id, can be None if the job request failed.
-
- """
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases hostname_list %s \
- slice_record %s lease_start_time %s lease_duration %s "\
- %( hostname_list, slice_record , lease_start_time, \
- lease_duration))
-
- #tmp = slice_record['reg-researchers'][0].split(".")
- username = slice_record['login']
- #username = tmp[(len(tmp)-1)]
- job_id = self.testbed_shell.LaunchExperimentOnOAR(hostname_list, \
- slice_record['hrn'], \
- lease_start_time, lease_duration, \
- username)
- if job_id is not None:
- start_time = \
- datetime.fromtimestamp(int(lease_start_time)).\
- strftime(self.testbed_shell.time_format)
- end_time = lease_start_time + lease_duration
-
-
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases TURN ON LOGGING SQL \
- %s %s %s "%(slice_record['hrn'], job_id, end_time))
-
-
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases %s %s %s " \
- %(type(slice_record['hrn']), type(job_id), type(end_time)))
-
- iotlab_ex_row = LeaseTableXP(slice_hrn = slice_record['hrn'],
- experiment_id=job_id,
- end_time= end_time)
-
- logger.debug("IOTLAB_API \r\n \r\n \t AddLeases iotlab_ex_row %s" \
- %(iotlab_ex_row))
- self.api.dbsession().add(iotlab_ex_row)
- self.api.dbsession().commit()
-
- logger.debug("IOTLAB_API \t AddLeases hostname_list start_time %s "
- %(start_time))
-
- return job_id
-
- def GetLeases(self, lease_filter_dict=None, login=None):
- """
-
- Get the list of leases from OAR with complete information
- about which slice owns which jobs and nodes.
- Two purposes:
- -Fetch all the jobs from OAR (running, waiting..)
- complete the reservation information with slice hrn
- found in lease_table . If not available in the table,
- assume it is a iotlab slice.
- -Updates the iotlab table, deleting jobs when necessary.
-
- :returns: reservation_list, list of dictionaries with 'lease_id',
- 'reserved_nodes','slice_id', 'state', 'user', 'component_id_list',
- 'slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list
-
- """
-
- unfiltered_reservation_list = self.testbed_shell.GetReservedNodes(login)
-
- reservation_list = []
- #Find the slice associated with this user iotlab ldap uid
- logger.debug(" IOTLAB_API.PY \tGetLeases login %s\
- unfiltered_reservation_list %s "
- % (login, unfiltered_reservation_list))
- #Create user dict first to avoid looking several times for
- #the same user in LDAP SA 27/07/12
- job_oar_list = []
- jobs_psql_query = self.api.dbsession().query(LeaseTableXP).all()
- jobs_psql_dict = dict([(row.experiment_id, row.__dict__)
- for row in jobs_psql_query])
- #jobs_psql_dict = jobs_psql_dict)
- logger.debug("IOTLAB_API \tGetLeases jobs_psql_dict %s"
- % (jobs_psql_dict))
- jobs_psql_id_list = [row.experiment_id for row in jobs_psql_query]
-
- for resa in unfiltered_reservation_list:
- logger.debug("IOTLAB_API \tGetLeases USER %s"
- % (resa['user']))
- #Construct list of jobs (runing, waiting..) in oar
- job_oar_list.append(resa['lease_id'])
- #If there is information on the job in IOTLAB DB ]
- #(slice used and job id)
- if resa['lease_id'] in jobs_psql_dict:
- job_info = jobs_psql_dict[resa['lease_id']]
- logger.debug("IOTLAB_API \tGetLeases job_info %s"
- % (job_info))
- resa['slice_hrn'] = job_info['slice_hrn']
- resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
-
- #otherwise, assume it is a iotlab slice:
- else:
- resa['slice_id'] = hrn_to_urn(self.testbed_shell.root_auth \
- + '.' + resa['user'] + "_slice",
- 'slice')
- resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
-
- resa['component_id_list'] = []
- #Transform the hostnames into urns (component ids)
- for node in resa['reserved_nodes']:
-
- iotlab_xrn = xrn_object(self.testbed_shell.root_auth, node)
- resa['component_id_list'].append(iotlab_xrn.urn)
-
- if lease_filter_dict:
- logger.debug("IOTLAB_API \tGetLeases \
- \r\n leasefilter %s" % ( lease_filter_dict))
-
- # filter_dict_functions = {
- # 'slice_hrn' : IotlabShell.filter_lease_name,
- # 't_from' : IotlabShell.filter_lease_start_time
- # }
- reservation_list = list(unfiltered_reservation_list)
- for filter_type in lease_filter_dict:
- logger.debug("IOTLAB_API \tGetLeases reservation_list %s" \
- % (reservation_list))
- reservation_list = self.testbed_shell.filter_lease(
- reservation_list,filter_type,
- lease_filter_dict[filter_type] )
-
- # Filter the reservation list with a maximum timespan so that the
- # leases and jobs running after this timestamp do not appear
- # in the result leases.
- # if 'start_time' in :
- # if resa['start_time'] < lease_filter_dict['start_time']:
- # reservation_list.append(resa)
-
-
- # if 'name' in lease_filter_dict and \
- # lease_filter_dict['name'] == resa['slice_hrn']:
- # reservation_list.append(resa)
-
-
- if lease_filter_dict is None:
- reservation_list = unfiltered_reservation_list
-
- self.update_experiments_in_lease_table(job_oar_list, jobs_psql_id_list)
-
- logger.debug(" IOTLAB_API.PY \tGetLeases reservation_list %s"
- % (reservation_list))
- return reservation_list
-
-
-
- def update_experiments_in_lease_table(self,
- experiment_list_from_testbed, experiment_list_in_db):
- """ Cleans the lease_table by deleting expired and cancelled jobs.
-
- Compares the list of experiment ids given by the testbed with the
- experiment ids that are already in the database, deletes the
- experiments that are no longer in the testbed experiment id list.
-
- :param experiment_list_from_testbed: list of experiment ids coming
- from testbed
- :type experiment_list_from_testbed: list
- :param experiment_list_in_db: list of experiment ids from the sfa
- additionnal database.
- :type experiment_list_in_db: list
-
- :returns: None
- """
- #Turn the list into a set
- set_experiment_list_in_db = set(experiment_list_in_db)
-
- kept_experiments = set(experiment_list_from_testbed).intersection(set_experiment_list_in_db)
- logger.debug("\r\n \t update_experiments_in_lease_table \
- experiment_list_in_db %s \r\n \
- experiment_list_from_testbed %s \
- kept_experiments %s "
- % (set_experiment_list_in_db,
- experiment_list_from_testbed, kept_experiments))
- deleted_experiments = set_experiment_list_in_db.difference(
- kept_experiments)
- deleted_experiments = list(deleted_experiments)
- if len(deleted_experiments) > 0:
- request = self.api.dbsession().query(LeaseTableXP)
- request.filter(LeaseTableXP.experiment_id.in_(deleted_experiments)).delete(synchronize_session='fetch')
- self.api.dbsession().commit()
- return
-
-
- def AddSlice(self, slice_record, user_record):
- """
-
- Add slice to the local iotlab sfa tables if the slice comes
- from a federated site and is not yet in the iotlab sfa DB,
- although the user has already a LDAP login.
- Called by verify_slice during lease/sliver creation.
-
- :param slice_record: record of slice, must contain hrn, gid, slice_id
- and authority of the slice.
- :type slice_record: dictionary
- :param user_record: record of the user
- :type user_record: RegUser
-
- """
-
- sfa_record = RegSlice(hrn=slice_record['hrn'],
- gid=slice_record['gid'],
- #pointer=slice_record['slice_id'],
- authority=slice_record['authority'])
- logger.debug("IOTLAB_API.PY AddSlice sfa_record %s user_record %s"
- % (sfa_record, user_record))
- sfa_record.just_created()
- self.api.dbsession().add(sfa_record)
- self.api.dbsession().commit()
- #Update the reg-researchers dependency table
- sfa_record.reg_researchers = [user_record]
- self.api.dbsession().commit()
-
- return
-
- def augment_records_with_testbed_info(self, record_list):
- """
-
- Adds specific testbed info to the records.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- """
- return self.fill_record_info(record_list)
-
- def fill_record_info(self, record_list):
- """
-
- For each SFA record, fill in the iotlab specific and SFA specific
- fields in the record.
-
- :param record_list: list of sfa dictionaries records
- :type record_list: list
- :returns: list of records with extended information in each record
- :rtype: list
-
- .. warning:: Should not be modifying record_list directly because modi
- fication are kept outside the method's scope. Howerver, there is no
- other way to do it given the way it's called in registry manager.
-
- """
-
- logger.debug("IOTLABDRIVER \tfill_record_info records %s "
- % (record_list))
- if not isinstance(record_list, list):
- record_list = [record_list]
-
- try:
- for record in record_list:
-
- if str(record['type']) == 'node':
- # look for node info using GetNodes
- # the record is about one node only
- filter_dict = {'hrn': [record['hrn']]}
- node_info = self.testbed_shell.GetNodes(filter_dict)
- # the node_info is about one node only, but it is formatted
- # as a list
- record.update(node_info[0])
- logger.debug("IOTLABDRIVER.PY \t \
- fill_record_info NODE" % (record))
-
- #If the record is a SFA slice record, then add information
- #about the user of this slice. This kind of
- #information is in the Iotlab's DB.
- if str(record['type']) == 'slice':
- if 'reg_researchers' in record and isinstance(record
- ['reg_researchers'],
- list):
- record['reg_researchers'] = \
- record['reg_researchers'][0].__dict__
- record.update(
- {'PI': [record['reg_researchers']['hrn']],
- 'researcher': [record['reg_researchers']['hrn']],
- 'name': record['hrn'],
- 'oar_job_id': [],
- 'node_ids': [],
- 'person_ids': [record['reg_researchers']
- ['record_id']],
- # For client_helper.py compatibility
- 'geni_urn': '',
- # For client_helper.py compatibility
- 'keys': '',
- # For client_helper.py compatibility
- 'key_ids': ''})
-
- #Get iotlab slice record and oar job id if any.
- recslice_list = self.GetSlices(
- slice_filter=str(record['hrn']),
- slice_filter_type='slice_hrn')
-
- logger.debug("IOTLABDRIVER \tfill_record_info \
- TYPE SLICE RECUSER record['hrn'] %s record['oar_job_id']\
- %s " % (record['hrn'], record['oar_job_id']))
- del record['reg_researchers']
- try:
- for rec in recslice_list:
- logger.debug("IOTLABDRIVER\r\n \t \
- fill_record_info oar_job_id %s "
- % (rec['oar_job_id']))
-
- record['node_ids'] = [self.testbed_shell.root_auth +
- '.' + hostname for hostname
- in rec['node_ids']]
- except KeyError:
- pass
-
- logger.debug("IOTLABDRIVER.PY \t fill_record_info SLICE \
- recslice_list %s \r\n \t RECORD %s \r\n \
- \r\n" % (recslice_list, record))
-
- if str(record['type']) == 'user':
- #The record is a SFA user record.
- #Get the information about his slice from Iotlab's DB
- #and add it to the user record.
- recslice_list = self.GetSlices(
- slice_filter=record['record_id'],
- slice_filter_type='record_id_user')
-
- logger.debug("IOTLABDRIVER.PY \t fill_record_info \
- TYPE USER recslice_list %s \r\n \t RECORD %s \r\n"
- % (recslice_list, record))
- #Append slice record in records list,
- #therefore fetches user and slice info again(one more loop)
- #Will update PIs and researcher for the slice
-
- recuser = recslice_list[0]['reg_researchers']
- logger.debug("IOTLABDRIVER.PY \t fill_record_info USER \
- recuser %s \r\n \r\n" % (recuser))
- recslice = {}
- recslice = recslice_list[0]
- recslice.update(
- {'PI': [recuser['hrn']],
- 'researcher': [recuser['hrn']],
- 'name': recuser['hrn'],
- 'node_ids': [],
- 'oar_job_id': [],
- 'person_ids': [recuser['record_id']]})
- try:
- for rec in recslice_list:
- recslice['oar_job_id'].append(rec['oar_job_id'])
- except KeyError:
- pass
-
- recslice.update({'type': 'slice',
- 'hrn': recslice_list[0]['hrn']})
-
- #GetPersons takes [] as filters
- user_iotlab = self.testbed_shell.GetPersons([record])
-
- record.update(user_iotlab[0])
- #For client_helper.py compatibility
- record.update(
- {'geni_urn': '',
- 'keys': '',
- 'key_ids': ''})
- record_list.append(recslice)
-
- logger.debug("IOTLABDRIVER.PY \t \
- fill_record_info ADDING SLICE\
- INFO TO USER records %s" % (record_list))
-
- except TypeError, error:
- logger.log_exc("IOTLABDRIVER \t fill_record_info EXCEPTION %s"
- % (error))
-
- return record_list
-
- def sliver_status(self, slice_urn, slice_hrn):
- """
- Receive a status request for slice named urn/hrn
- urn:publicid:IDN+iotlab+nturro_slice hrn iotlab.nturro_slice
- shall return a structure as described in
- http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
- NT : not sure if we should implement this or not, but used by sface.
-
- :param slice_urn: slice urn
- :type slice_urn: string
- :param slice_hrn: slice hrn
- :type slice_hrn: string
-
- """
-
- #First get the slice with the slice hrn
- slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if len(slice_list) == 0:
- raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
-
- #Used for fetching the user info witch comes along the slice info
- one_slice = slice_list[0]
-
- #Make a list of all the nodes hostnames in use for this slice
- slice_nodes_list = []
- slice_nodes_list = one_slice['node_ids']
- #Get all the corresponding nodes details
- nodes_all = self.testbed_shell.GetNodes(
- {'hostname': slice_nodes_list},
- ['node_id', 'hostname', 'site', 'boot_state'])
- nodeall_byhostname = dict([(one_node['hostname'], one_node)
- for one_node in nodes_all])
-
- for single_slice in slice_list:
- #For compatibility
- top_level_status = 'empty'
- result = {}
- result.fromkeys(
- ['geni_urn', 'geni_error', 'iotlab_login', 'geni_status',
- 'geni_resources'], None)
- # result.fromkeys(\
- # ['geni_urn','geni_error', 'pl_login','geni_status',
- # 'geni_resources'], None)
- # result['pl_login'] = one_slice['reg_researchers'][0].hrn
- result['iotlab_login'] = one_slice['user']
- logger.debug("Slabdriver - sliver_status Sliver status \
- urn %s hrn %s single_slice %s \r\n "
- % (slice_urn, slice_hrn, single_slice))
-
- if 'node_ids' not in single_slice:
- #No job in the slice
- result['geni_status'] = top_level_status
- result['geni_resources'] = []
- return result
-
- top_level_status = 'ready'
-
- #A job is running on Iotlab for this slice
- # report about the local nodes that are in the slice only
-
- result['geni_urn'] = slice_urn
-
- resources = []
- for node_hostname in single_slice['node_ids']:
- res = {}
- res['iotlab_hostname'] = node_hostname
- res['iotlab_boot_state'] = \
- nodeall_byhostname[node_hostname]['boot_state']
-
- #res['pl_hostname'] = node['hostname']
- #res['pl_boot_state'] = \
- #nodeall_byhostname[node['hostname']]['boot_state']
- #res['pl_last_contact'] = strftime(self.time_format, \
- #gmtime(float(timestamp)))
- sliver_id = Xrn(
- slice_urn, type='slice',
- id=nodeall_byhostname[node_hostname]['node_id']).urn
-
- res['geni_urn'] = sliver_id
- #node_name = node['hostname']
- if nodeall_byhostname[node_hostname]['boot_state'] == 'Alive':
-
- res['geni_status'] = 'ready'
- else:
- res['geni_status'] = 'failed'
- top_level_status = 'failed'
-
- res['geni_error'] = ''
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
- logger.debug("IOTLABDRIVER \tsliver_statusresources %s res %s "
- % (resources, res))
- return result
-
- def get_user_record(self, hrn):
- """
-
- Returns the user record based on the hrn from the SFA DB .
-
- :param hrn: user's hrn
- :type hrn: string
- :returns: user record from SFA database
- :rtype: RegUser
-
- """
- return self.api.dbsession().query(RegRecord).filter_by(hrn=hrn).first()
-
- def testbed_name(self):
- """
-
- Returns testbed's name.
- :returns: testbed authority name.
- :rtype: string
-
- """
- return self.hrn
-
-
- def _get_requested_leases_list(self, rspec):
- """
- Process leases in rspec depending on the rspec version (format)
- type. Find the lease requests in the rspec and creates
- a lease request list with the mandatory information ( nodes,
- start time and duration) of the valid leases (duration above or
- equal to the iotlab experiment minimum duration).
-
- :param rspec: rspec request received.
- :type rspec: RSpec
- :returns: list of lease requests found in the rspec
- :rtype: list
- """
- requested_lease_list = []
- for lease in rspec.version.get_leases():
- single_requested_lease = {}
- logger.debug("IOTLABDRIVER.PY \t \
- _get_requested_leases_list lease %s " % (lease))
-
- if not lease.get('lease_id'):
- if get_authority(lease['component_id']) == \
- self.testbed_shell.root_auth:
- single_requested_lease['hostname'] = \
- xrn_to_hostname(\
- lease.get('component_id').strip())
- single_requested_lease['start_time'] = \
- lease.get('start_time')
- single_requested_lease['duration'] = lease.get('duration')
- #Check the experiment's duration is valid before adding
- #the lease to the requested leases list
- duration_in_seconds = \
- int(single_requested_lease['duration'])
- if duration_in_seconds >= self.testbed_shell.GetMinExperimentDurationInGranularity():
- requested_lease_list.append(single_requested_lease)
-
- return requested_lease_list
-
- @staticmethod
- def _group_leases_by_start_time(requested_lease_list):
- """
- Create dict of leases by start_time, regrouping nodes reserved
- at the same time, for the same amount of time so as to
- define one job on OAR.
-
- :param requested_lease_list: list of leases
- :type requested_lease_list: list
- :returns: Dictionary with key = start time, value = list of leases
- with the same start time.
- :rtype: dictionary
-
- """
-
- requested_xp_dict = {}
- for lease in requested_lease_list:
-
- #In case it is an asap experiment start_time is empty
- if lease['start_time'] == '':
- lease['start_time'] = '0'
-
- if lease['start_time'] not in requested_xp_dict:
- if isinstance(lease['hostname'], str):
- lease['hostname'] = [lease['hostname']]
-
- requested_xp_dict[lease['start_time']] = lease
-
- else:
- job_lease = requested_xp_dict[lease['start_time']]
- if lease['duration'] == job_lease['duration']:
- job_lease['hostname'].append(lease['hostname'])
-
- return requested_xp_dict
-
- def _process_requested_xp_dict(self, rspec):
- """
- Turns the requested leases and information into a dictionary
- of requested jobs, grouped by starting time.
-
- :param rspec: RSpec received
- :type rspec : RSpec
- :rtype: dictionary
-
- """
- requested_lease_list = self._get_requested_leases_list(rspec)
- logger.debug("IOTLABDRIVER _process_requested_xp_dict \
- requested_lease_list %s" % (requested_lease_list))
- xp_dict = self._group_leases_by_start_time(requested_lease_list)
- logger.debug("IOTLABDRIVER _process_requested_xp_dict xp_dict\
- %s" % (xp_dict))
-
- return xp_dict
-
-
-
- def delete(self, slice_urns, options=None):
- """
- Deletes the lease associated with the slice hrn and the credentials
- if the slice belongs to iotlab. Answer to DeleteSliver.
-
- :param slice_urn: urn of the slice
- :type slice_urn: string
-
-
- :returns: 1 if the slice to delete was not found on iotlab,
- True if the deletion was successful, False otherwise otherwise.
-
- .. note:: Should really be named delete_leases because iotlab does
- not have any slivers, but only deals with leases. However,
- SFA api only have delete_sliver define so far. SA 13/05/2013
- .. note:: creds are unused, and are not used either in the dummy driver
- delete_sliver .
- """
- if options is None: options={}
- # collect sliver ids so we can update sliver allocation states after
- # we remove the slivers.
- aggregate = IotlabAggregate(self)
- slivers = aggregate.get_slivers(slice_urns)
- if slivers:
- # slice_id = slivers[0]['slice_id']
- node_ids = []
- sliver_ids = []
- sliver_jobs_dict = {}
- for sliver in slivers:
- node_ids.append(sliver['node_id'])
- sliver_ids.append(sliver['sliver_id'])
- job_id = sliver['sliver_id'].split('+')[-1].split('-')[0]
- sliver_jobs_dict[job_id] = sliver['sliver_id']
- logger.debug("IOTLABDRIVER.PY delete_sliver slivers %s slice_urns %s"
- % (slivers, slice_urns))
- slice_hrn = urn_to_hrn(slice_urns[0])[0]
-
- sfa_slice_list = self.GetSlices(slice_filter=slice_hrn,
- slice_filter_type='slice_hrn')
-
- if not sfa_slice_list:
- return 1
-
- #Delete all leases in the slice
- for sfa_slice in sfa_slice_list:
- logger.debug("IOTLABDRIVER.PY delete_sliver slice %s" % (sfa_slice))
- slices = IotlabSlices(self)
- # determine if this is a peer slice
-
- peer = slices.get_peer(slice_hrn)
-
- logger.debug("IOTLABDRIVER.PY delete_sliver peer %s \
- \r\n \t sfa_slice %s " % (peer, sfa_slice))
- oar_bool_ans = self.testbed_shell.DeleteSliceFromNodes(
- sfa_slice)
- for job_id in oar_bool_ans:
- # if the job has not been successfully deleted
- # don't delete the associated sliver
- # remove it from the sliver list
- if oar_bool_ans[job_id] is False:
- sliver = sliver_jobs_dict[job_id]
- sliver_ids.remove(sliver)
- try:
-
- dbsession = self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
- except :
- logger.log_exc("IOTLABDRIVER.PY delete error ")
-
- # prepare return struct
- geni_slivers = []
- for sliver in slivers:
- geni_slivers.append(
- {'geni_sliver_urn': sliver['sliver_id'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': datetime_to_string(utcparse(sliver['expires']))})
- return geni_slivers
-
-
-
-
- def list_slices(self, creds, options):
- """Answer to ListSlices.
-
- List slices belonging to iotlab, returns slice urns list.
- No caching used. Options unused but are defined in the SFA method
- api prototype.
-
- :returns: slice urns list
- :rtype: list
-
- .. note:: creds and options are unused - SA 12/12/13
- """
- # look in cache first
- #if self.cache:
- #slices = self.cache.get('slices')
- #if slices:
- #logger.debug("PlDriver.list_slices returns from cache")
- #return slices
-
- # get data from db
-
- slices = self.GetSlices()
- logger.debug("IOTLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n"
- % (slices))
- slice_hrns = [iotlab_slice['hrn'] for iotlab_slice in slices]
-
- slice_urns = [hrn_to_urn(slice_hrn, 'slice')
- for slice_hrn in slice_hrns]
-
- # cache the result
- #if self.cache:
- #logger.debug ("IotlabDriver.list_slices stores value in cache")
- #self.cache.add('slices', slice_urns)
-
- return slice_urns
-
+ ########################################
+ ########## registry oriented
+ ########################################
+ ##########
def register(self, sfa_record, hrn, pub_key):
- """
- Adding new user, slice, node or site should not be handled
- by SFA.
-
- ..warnings:: should not be used. Different components are in charge of
- doing this task. Adding nodes = OAR
- Adding users = LDAP Iotlab
- Adding slice = Import from LDAP users
- Adding site = OAR
-
- :param sfa_record: record provided by the client of the
- Register API call.
- :type sfa_record: dict
- :param pub_key: public key of the user
- :type pub_key: string
-
- .. note:: DOES NOTHING. Returns -1.
-
- """
+ logger.warning("iotlabdriver register : not implemented")
return -1
-
+ ##########
def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
- """
- No site or node record update allowed in Iotlab. The only modifications
- authorized here are key deletion/addition on an existing user and
- password change. On an existing user, CAN NOT BE MODIFIED: 'first_name',
- 'last_name', 'email'. DOES NOT EXIST IN SENSLAB: 'phone', 'url', 'bio',
- 'title', 'accepted_aup'. A slice is bound to its user, so modifying the
- user's ssh key should nmodify the slice's GID after an import procedure.
-
- :param old_sfa_record: what is in the db for this hrn
- :param new_sfa_record: what was passed to the update call
- :param new_key: the new user's public key
- :param hrn: the user's sfa hrn
- :type old_sfa_record: dict
- :type new_sfa_record: dict
- :type new_key: string
- :type hrn: string
-
- TODO: needs review
- .. warning:: SA 12/12/13 - Removed. should be done in iotlabimporter
- since users, keys and slice are managed by the LDAP.
-
- """
- # pointer = old_sfa_record['pointer']
- # old_sfa_record_type = old_sfa_record['type']
-
- # # new_key implemented for users only
- # if new_key and old_sfa_record_type not in ['user']:
- # raise UnknownSfaType(old_sfa_record_type)
-
- # if old_sfa_record_type == "user":
- # update_fields = {}
- # all_fields = new_sfa_record
- # for key in all_fields.keys():
- # if key in ['key', 'password']:
- # update_fields[key] = all_fields[key]
-
- # if new_key:
- # # must check this key against the previous one if it exists
- # persons = self.testbed_shell.GetPersons([old_sfa_record])
- # person = persons[0]
- # keys = [person['pkey']]
- # #Get all the person's keys
- # keys_dict = self.GetKeys(keys)
-
- # # Delete all stale keys, meaning the user has only one key
- # #at a time
- # #TODO: do we really want to delete all the other keys?
- # #Is this a problem with the GID generation to have multiple
- # #keys? SA 30/05/13
- # key_exists = False
- # if key in keys_dict:
- # key_exists = True
- # else:
- # #remove all the other keys
- # for key in keys_dict:
- # self.testbed_shell.DeleteKey(person, key)
- # self.testbed_shell.AddPersonKey(
- # person, {'sshPublicKey': person['pkey']},
- # {'sshPublicKey': new_key})
- logger.warning ("UNDEFINED - Update should be done by the \
- iotlabimporter")
+ logger.warning("iotlabdriver update : not implemented")
return True
- def remove(self, sfa_record):
- """
-
- Removes users only. Mark the user as disabled in LDAP. The user and his
- slice are then deleted from the db by running an import on the registry.
-
- :param sfa_record: record is the existing sfa record in the db
- :type sfa_record: dict
-
- ..warning::As fas as the slice is concerned, here only the leases are
- removed from the slice. The slice is record itself is not removed
- from the db.
-
- TODO: needs review
-
- TODO : REMOVE SLICE FROM THE DB AS WELL? SA 14/05/2013,
-
- TODO: return boolean for the slice part
- """
- sfa_record_type = sfa_record['type']
- hrn = sfa_record['hrn']
- if sfa_record_type == 'user':
-
- #get user from iotlab ldap
- person = self.testbed_shell.GetPersons(sfa_record)
- #No registering at a given site in Iotlab.
- #Once registered to the LDAP, all iotlab sites are
- #accesible.
- if person:
- #Mark account as disabled in ldap
- return self.testbed_shell.DeletePerson(sfa_record)
-
- elif sfa_record_type == 'slice':
- if self.GetSlices(slice_filter=hrn,
- slice_filter_type='slice_hrn'):
- ret = self.testbed_shell.DeleteSlice(sfa_record)
- return True
-
- def check_sliver_credentials(self, creds, urns):
- """Check that the sliver urns belongs to the slice specified in the
- credentials.
-
- :param urns: list of sliver urns.
- :type urns: list.
- :param creds: slice credentials.
- :type creds: Credential object.
-
-
- """
- # build list of cred object hrns
- slice_cred_names = []
- for cred in creds:
- slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
- slicename = IotlabXrn(xrn=slice_cred_hrn).iotlab_slicename()
- slice_cred_names.append(slicename)
- # look up slice name of slivers listed in urns arg
-
- slice_ids = []
- for urn in urns:
- sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
- try:
- slice_ids.append(int(sliver_id_parts[0]))
- except ValueError:
- pass
-
- if not slice_ids:
- raise Forbidden("sliver urn not provided")
-
- slices = self.GetSlices(slice_ids)
- sliver_names = [single_slice['name'] for single_slice in slices]
+ ##########
+ def remove(self, sfa_record):
+ logger.warning("iotlabdriver remove : not implemented")
+ return True
- # make sure we have a credential for every specified sliver
- for sliver_name in sliver_names:
- if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
- raise Forbidden(msg)
########################################
########## aggregate oriented
########################################
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
- def aggregate_version(self):
- """
+ def provision(self, urns, options=None):
+ logger.warning("iotlabdriver provision : not implemented")
+ version_manager = VersionManager()
+ opt = options['geni_rspec_version']
+ rspec_version = version_manager.get_version(opt)
+ return self.describe(urns, rspec_version, options=options)
- Returns the testbed's supported rspec advertisement and request
- versions.
- :returns: rspec versions supported ad a dictionary.
- :rtype: dict
- """
+ def delete(self, urns, options=None):
+ logger.warning("iotlabdriver delete : not implemented")
+ geni_slivers = []
+ return geni_slivers
+
+
+ def aggregate_version(self):
+ logger.warning("iotlabdriver aggregate_version")
version_manager = VersionManager()
ad_rspec_versions = []
request_rspec_versions = []
if rspec_version.content_type in ['*', 'request']:
request_rspec_versions.append(rspec_version.to_dict())
return {
- 'testbed': self.testbed_name(),
+ 'testbed': self.hrn,
'geni_request_rspec_versions': request_rspec_versions,
'geni_ad_rspec_versions': ad_rspec_versions}
- # first 2 args are None in case of resource discovery
- def list_resources (self, version=None, options=None):
- if options is None: options={}
- aggregate = IotlabAggregate(self)
- rspec = aggregate.list_resources(version=version, options=options)
+
+ def list_resources(self, version=None, options=None):
+ logger.warning("iotlabdriver list_resources")
+ if not options:
+ options = {}
+ aggregate = IotLABAggregate(self)
+ rspec = aggregate.list_resources(version=version, options=options)
return rspec
- def describe(self, urns, version, options={}):
- aggregate = IotlabAggregate(self)
+
+ def describe(self, urns, version, options=None):
+ logger.warning("iotlabdriver describe")
+ if not options:
+ options = {}
+ aggregate = IotLABAggregate(self)
return aggregate.describe(urns, version=version, options=options)
- def status (self, urns, options=None):
- if options is None: options={}
- aggregate = IotlabAggregate(self)
- desc = aggregate.describe(urns, version='GENI 3')
+
+ def status(self, urns, options=None):
+ logger.warning("iotlabdriver status")
+ aggregate = IotLABAggregate(self)
+ desc = aggregate.describe(urns, version='GENI 3')
status = {'geni_urn': desc['geni_urn'],
'geni_slivers': desc['geni_slivers']}
return status
- def allocate (self, urn, rspec_string, expiration, options=None):
- if options is None: options={}
- xrn = Xrn(urn)
- aggregate = IotlabAggregate(self)
+ def _get_users(self):
+ """ Get all users """
+ ret = self.shell.get_users()
+ if 'error' in ret:
+ return None
+ return ret
- slices = IotlabSlices(self)
- peer = slices.get_peer(xrn.get_hrn())
- sfa_peer = slices.get_sfa_peer(xrn.get_hrn())
- caller_hrn = options.get('actual_caller_hrn', [])
- caller_xrn = Xrn(caller_hrn)
- caller_urn = caller_xrn.get_urn()
+ def _get_user_login(self, caller_user):
+ """ Get user login with email """
+ email = caller_user['email']
+ # ensure user exist in LDAP tree
+ users = self._get_users()
+ if users and not email in users:
+ self.shell.add_user(caller_user)
+ users = self._get_users()
+ if users and email in users:
+ return users[email]['login']
+ else:
+ return None
+
- logger.debug("IOTLABDRIVER.PY :: Allocate caller = %s" % (caller_urn))
+ @classmethod
+ def _get_experiment(cls, rspec):
+ """
+ Find in RSpec leases the experiment start time, duration and nodes list.
+
+ :Example:
+ <rspec>
+ ...
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792400" duration="30">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-10.grenoble.iot-lab.info"/>
+ </lease>
+ <lease slice_id="urn:publicid:IDN+onelab:inria+slice+test_iotlab"
+ start_time="1427792600" duration="50">
+ <node component_id=
+ "urn:publicid:IDN+iotlab+node+m3-15.grenoble.iot-lab.info"/>
+ </lease>
+ ...
+ </rspec>
+ """
+ leases = rspec.version.get_leases()
+ start_time = min([int(lease['start_time'])
+ for lease in leases])
+ end_time = max([int(lease['start_time']) +
+ int(lease['duration'])
+ for lease in leases])
+ nodes_list = [Xrn.unescape(Xrn(lease['component_id'].strip(),
+ type='node').get_leaf())
+ for lease in leases]
+ # uniq hostnames
+ nodes_list = list(set(nodes_list))
+ from math import floor
+ duration = floor((end_time - start_time)/60) # minutes
+ return nodes_list, start_time, duration
+
+
+ def _save_db_lease(self, job_id, slice_hrn):
+ """ Save lease table row in SFA database """
+ lease_row = LeaseTable(job_id,
+ slice_hrn)
+ logger.warning("iotlabdriver _save_db_lease lease row : %s" %
+ lease_row)
+ self.api.dbsession().add(lease_row)
+ self.api.dbsession().commit()
- slice_record = {}
- users = options.get('geni_users', [])
- sfa_users = options.get('sfa_users', [])
-
- if sfa_users:
- user = None
- # Looking for the user who actually called the Allocate function in the list of users of the slice
- for u in sfa_users:
- if 'urn' in u and u['urn'] == caller_urn:
- user = u
- logger.debug("user = %s" % u)
- # If we find the user in the list we use it, else we take the 1st in the list as before
- if user:
- user_hrn = caller_hrn
- else:
- user = sfa_users[0]
- # XXX Always empty ??? no slice_record in the Allocate call
- #slice_record = sfa_users[0].get('slice_record', [])
- user_xrn = Xrn(sfa_users[0]['urn'])
- user_hrn = user_xrn.get_hrn()
- slice_record = user.get('slice_record', {})
- slice_record['user'] = {'keys': user['keys'],
- 'email': user['email'],
- 'hrn': user_hrn}
- slice_record['authority'] = xrn.get_authority_hrn()
+ def allocate(self, urn, rspec_string, expiration, options=None):
+ """
+ Allocate method submit an experiment on Iot-LAB testbed with :
+ * user : get the slice user which launch request (caller_hrn)
+ * reservation : get the start time and duration in RSpec leases
+ * nodes : get the nodes list in RSpec leases
+ If we have a request success on Iot-LAB testbed we store in SFA
+ database the assocation OAR scheduler job id and slice hrn
- logger.debug("IOTLABDRIVER.PY \t urn %s allocate options %s "
- % (urn, options))
+ :param urn : slice urn
+ :param rspec_string : RSpec received
+ :param options : options with slice users (geni_users)
+ """
+ # pylint:disable=R0914
+ logger.warning("iotlabdriver allocate")
+ xrn = Xrn(urn)
+ aggregate = IotLABAggregate(self)
# parse rspec
rspec = RSpec(rspec_string)
- # requested_attributes = rspec.version.get_slice_attributes()
-
- # ensure site record exists
-
- # ensure person records exists
- for user in users:
- # XXX LOIC using hrn is a workaround because the function
- # Xrn.get_urn returns 'urn:publicid:IDN+onelab:upmc+timur_friedman'
- # Instead of this 'urn:publicid:IDN+onelab:upmc+user+timur_friedman'
- user['hrn'] = urn_to_hrn(user['urn'])[0]
- # XXX LOIC adding the users of the slice to reg-researchers
- # reg-researchers is used in iotlabslices.py verify_slice in order to add the slice
- if 'reg-researchers' not in slice_record:
- slice_record['reg-researchers'] = list()
- slice_record['reg-researchers'].append(user['hrn'])
- if caller_hrn == user['hrn']:
- #hierarchical_user = user['hrn'].split(".")
- #user['login'] = hierarchical_user[-1]
- #slice_record['login'] = user['login']
- slice_record['user']=user
-
- # oui c'est degueulasse, le slice_record se retrouve modifie
- # dans la methode avec les infos du user, els infos sont propagees
- # dans verify_slice_leases
- logger.debug("IOTLABDRIVER.PY BEFORE slices.verify_persons")
-
- # XXX JORDAN XXX slice_record devrait recevoir le caller_xrn...
- # LOIC maintenant c'est fait au dessus
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[user][hrn] = %s" % slice_record['user']['hrn'])
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[reg-researchers] = %s" % slice_record['reg-researchers'])
- persons = slices.verify_persons(xrn.hrn, slice_record, users,
- options=options)
- logger.debug("IOTLABDRIVER.PY AFTER slices.verify_persons")
- logger.debug("LOIC - slice_record[user] = %s" % slice_record['user'])
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[reg-researchers] = %s" % slice_record['reg-researchers'])
-
- # ensure slice record exists
- current_slice = slices.verify_slice(xrn.hrn, slice_record, sfa_peer)
- logger.debug("LOIC - AFTER verify_slice - slice_record[user] = %s" % slice_record['user'])
- logger.debug("IOTLABDRIVER.PY - LOIC - slice_record[reg-researchers] = %s" % slice_record['reg-researchers'])
- logger.debug("IOTLABDRIVER.PY \t ===============allocate \t\
- \r\n \r\n current_slice %s" % (current_slice))
-
- # ensure slice attributes exists
- # slices.verify_slice_attributes(slice, requested_attributes,
- # options=options)
- # add/remove slice from nodes
- # XXX JORDAN ensure requested_xp_dict returns a dict with all new leases
- requested_xp_dict = self._process_requested_xp_dict(rspec)
-
- logger.debug("IOTLABDRIVER.PY \tallocate requested_xp_dict %s "
- % (requested_xp_dict))
- request_nodes = rspec.version.get_nodes_with_slivers()
-
-
- # JORDAN: nodes_list will contain a list of newly allocated nodes
- nodes_list = []
- for start_time in requested_xp_dict:
- lease = requested_xp_dict[start_time]
- for hostname in lease['hostname']:
- nodes_list.append(hostname)
-
- # nodes = slices.verify_slice_nodes(slice_record,request_nodes, peer)
- logger.debug("IOTLABDRIVER.PY \tallocate nodes_list %s slice_record %s"
- % (nodes_list, slice_record))
-
- # add/remove leases
- rspec_requested_leases = rspec.version.get_leases()
- leases = slices.verify_slice_leases(slice_record,
- requested_xp_dict, peer)
- # JORDAN:
- # leases = already in slice
- # rspec_requested_leases = newly requested
- logger.debug("IOTLABDRIVER.PY \tallocate leases %s \
- rspec_requested_leases %s" % (leases,
- rspec_requested_leases))
- # update sliver allocations
- # JORDAN Here we loop over newly allocated nodes
- for hostname in nodes_list:
- client_id = hostname
- node_urn = xrn_object(self.testbed_shell.root_auth, hostname).urn
- component_id = node_urn
- if 'reg-urn' in current_slice:
- slice_urn = current_slice['reg-urn']
- else:
- slice_urn = current_slice['urn']
-
- # JORDAN: We loop over leases previously in the slice
- for lease in leases:
- if hostname in lease['reserved_nodes']:
- index = lease['reserved_nodes'].index(hostname)
- sliver_hrn = '%s.%s-%s' % (self.hrn, lease['lease_id'],
- lease['resource_ids'][index] )
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
- record = SliverAllocation(sliver_id=sliver_id, client_id=client_id,
- component_id=component_id,
- slice_urn = slice_urn,
- allocation_state='geni_allocated')
- record.sync(self.api.dbsession())
-
- # JORDAN : added describe_options which was not specified at all
- describe_options = {
- 'geni_slice_urn': urn,
- 'list_leases': 'all',
- }
- return aggregate.describe([xrn.get_urn()], version=rspec.version, options=describe_options)
-
- def provision(self, urns, options=None):
- if options is None: options={}
- # update users
- slices = IotlabSlices(self)
- aggregate = IotlabAggregate(self)
- slivers = aggregate.get_slivers(urns)
- current_slice = slivers[0]
- peer = slices.get_peer(current_slice['hrn'])
- sfa_peer = slices.get_sfa_peer(current_slice['hrn'])
- users = options.get('geni_users', [])
- # persons = slices.verify_persons(current_slice['hrn'],
- # current_slice, users, peer, sfa_peer, options=options)
- # slices.handle_peer(None, None, persons, peer)
- # update sliver allocation states and set them to geni_provisioned
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',
- dbsession)
- version_manager = VersionManager()
- rspec_version = version_manager.get_version(options[
- 'geni_rspec_version'])
- # JORDAN : added describe_options instead of options
- # urns at the begining ???
- describe_options = {
- 'geni_slice_urn': current_slice['urn'],
- 'list_leases': 'all',
- }
- return self.describe(urns, rspec_version, options=describe_options)
+ caller_hrn = options.get('actual_caller_hrn', [])
+ geni_users = options.get('geni_users', [])
+ caller_user = [user for user in geni_users if
+ urn_to_hrn(user['urn'])[0] == caller_hrn][0]
+ logger.warning("iotlabdriver allocate caller : %s" %
+ caller_user['email'])
+
+ login = self._get_user_login(caller_user)
+ # only if we have a user
+ if login:
+ nodes_list, start_time, duration = \
+ self._get_experiment(rspec)
+ logger.warning("iotlabdriver allocate submit OAR job :"
+ " %s %s %s %s" %
+ (xrn.hrn, start_time, duration, nodes_list))
+ # [0-9A-Za-z_] with onelab.inria.test_iotlab
+ exp_name = '_'.join((xrn.hrn).split('.'))
+ # submit OAR job
+ ret = self.shell.reserve_nodes(login,
+ exp_name,
+ nodes_list,
+ start_time,
+ duration)
+
+ # in case of job submission success save slice and lease job
+ # id association in database
+ if 'id' in ret:
+ self._save_db_lease(int(ret['id']),
+ xrn.hrn)
+
+ return aggregate.describe([xrn.get_urn()], version=rspec.version)
--- /dev/null
+# -*- coding:utf-8 -*-
+""" PostGreSQL table management """
+
+from sfa.storage.model import Base
+from sqlalchemy import Column, Integer, String
+
+
+class LeaseTable(Base):
+ """ SQL alchemy class to manipulate the rows of the lease_table table in the
+ SFA database. Table creation is made by the importer (iotlabimporter.py)
+ if it is not in the database yet.
+
+ As we don't have a link between a lease (OAR job submission) and a slice we
+ store this information in database. We matched OAR job id and slice hrn.
+ """
+ # pylint:disable=R0903
+ __tablename__ = 'lease_table'
+
+ job_id = Column(Integer, primary_key=True)
+ slice_hrn = Column(String)
+
+ def __init__(self, job_id, slice_hrn):
+ """
+ Defines a row of the lease_table table
+ """
+ self.job_id = job_id
+ self.slice_hrn = slice_hrn
+
+ def __repr__(self):
+ """Prints the SQLAlchemy record to the format defined
+ by the function.
+ """
+ result = "job_id %s, slice_hrn = %s" % (self.job_id,
+ self.slice_hrn)
+ return result
+++ /dev/null
-"""
-File holding a class to define the table in the iotlab dedicated table.
-The table is the SFA dtabase, therefore all the access mecanism
-(session, engine...) is handled by alchemy.py.
-
-..seealso:: alchemy.py
-"""
-
-from sfa.storage.model import Base
-from sqlalchemy import Column, Integer, String
-
-
-
-class LeaseTableXP (Base):
- """ SQL alchemy class to manipulate the rows of the lease_table table in the
- SFA database. Handles the records representation and creates.
- Table creation is made by the importer if it is not in the database yet.
-
- .. seealso:: init_tables in model.py, run in iotlabimporter.py
-
- """
- __tablename__ = 'lease_table'
-
- slice_hrn = Column(String)
- experiment_id = Column(Integer, primary_key=True)
- end_time = Column(Integer, nullable=False)
-
- def __init__(self, slice_hrn=None, experiment_id=None, end_time=None):
- """
- Defines a row of the lease_table table
- """
- if slice_hrn:
- self.slice_hrn = slice_hrn
- if experiment_id:
- self.experiment_id = experiment_id
- if end_time:
- self.end_time = end_time
-
- def __repr__(self):
- """Prints the SQLAlchemy record to the format defined
- by the function.
- """
- result = "<lease_table : slice_hrn = %s , experiment_id %s \
- end_time = %s" % (self.slice_hrn, self.experiment_id,
- self.end_time)
- result += ">"
- return result
-"""
-File containing the IotlabShell, used to interact with nodes, users,
-slices, leases and keys, as well as the dedicated iotlab database and table,
-holding information about which slice is running which job.
-
-"""
-from datetime import datetime
+# -*- coding:utf-8 -*-
+""" Shell driver management """
from sfa.util.sfalogging import logger
-from sfa.util.sfatime import SFATIME_FORMAT
-
-from sfa.iotlab.OARrestapi import OARrestapi
-from sfa.iotlab.LDAPapi import LDAPapi
-
-
-class IotlabShell():
- """ Class enabled to use LDAP and OAR api calls. """
-
- _MINIMUM_DURATION = 10 # 10 units of granularity 60 s, 10 mins
-
- def __init__(self, config):
- """Creates an instance of OARrestapi and LDAPapi which will be used to
- issue calls to OAR or LDAP methods.
- Set the time format and the testbed granularity used for OAR
- reservation and leases.
-
- :param config: configuration object from sfa.util.config
- :type config: Config object
- """
-
- # self.leases_db = TestbedAdditionalSfaDB(config)
- self.oar = OARrestapi()
- self.ldap = LDAPapi()
- self.time_format = SFATIME_FORMAT
- self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
- self.grain = 60 # 10 mins lease minimum, 60 sec granularity
- #import logging, logging.handlers
- #from sfa.util.sfalogging import _SfaLogger
- #sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', \
- #level=logging.DEBUG)
- return
-
- @staticmethod
- def GetMinExperimentDurationInGranularity():
- """ Returns the minimum allowed duration for an experiment on the
- testbed. In seconds.
-
- """
- return IotlabShell._MINIMUM_DURATION
-
-
-
+from iotlabcli import auth
+from iotlabcli import rest
+from iotlabcli import helpers
+from iotlabcli import experiment
+from urllib2 import HTTPError
- #TODO : Handling OR request in make_ldap_filters_from_records
- #instead of the for loop
- #over the records' list
- def GetPersons(self, person_filter=None):
- """
- Get the enabled users and their properties from Iotlab LDAP.
- If a filter is specified, looks for the user whose properties match
- the filter, otherwise returns the whole enabled users'list.
-
- :param person_filter: Must be a list of dictionnaries with users
- properties when not set to None.
- :type person_filter: list of dict
-
- :returns: Returns a list of users whose accounts are enabled
- found in ldap.
- :rtype: list of dicts
-
- """
- logger.debug("IOTLAB_API \tGetPersons 1st person_filter %s"
- % (person_filter[0]['hrn']))
- person_list = []
- if person_filter and isinstance(person_filter, list):
- #If we are looking for a list of users (list of dict records)
- #Usually the list contains only one user record
- for searched_attributes in person_filter:
-
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person = self.ldap.LdapFindUser(searched_attributes,
- is_user_enabled=True)
- #If a person was found, append it to the list
- if person:
- person_list.append(person)
-
- #If the list is empty, return None
- if len(person_list) is 0:
- person_list = None
-
- else:
- #Get only enabled user accounts in iotlab LDAP :
- #add a filter for make_ldap_filters_from_record
- person_list = self.ldap.LdapFindUser(is_user_enabled=True)
- return person_list
+class IotLABShell(object):
+ """
+ A REST client shell to the Iot-LAB testbed API instance
+ """
+ def __init__(self):
+ user, passwd = auth.get_user_credentials()
+ self.api = rest.Api(user, passwd)
- #def GetTimezone(self):
- #""" Returns the OAR server time and timezone.
- #Unused SA 30/05/13"""
- #server_timestamp, server_tz = self.oar.parser.\
- #SendRequest("GET_timezone")
- #return server_timestamp, server_tz
- def DeleteJobs(self, job_id, username):
+ def get_nodes(self):
"""
-
- Deletes the job with the specified job_id and username on OAR by
- posting a delete request to OAR.
-
- :param job_id: job id in OAR.
- :param username: user's iotlab login in LDAP.
- :type job_id: integer
- :type username: string
-
- :returns: dictionary with the job id and if delete has been successful
- (True) or no (False)
- :rtype: dict
-
- """
- logger.debug("IOTLAB_API \tDeleteJobs jobid %s username %s "
- % (job_id, username))
- if not job_id or job_id is -1:
- return
-
- reqdict = {}
- reqdict['method'] = "delete"
- reqdict['strval'] = str(job_id)
-
- answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id',
- reqdict, username)
- if answer['status'] == 'Delete request registered':
- ret = {job_id: True}
- else:
- ret = {job_id: False}
- logger.debug("IOTLAB_API \tDeleteJobs jobid %s \r\n answer %s \
- username %s" % (job_id, answer, username))
- return ret
-
-
-
- ##TODO : Unused GetJobsId ? SA 05/07/12
- #def GetJobsId(self, job_id, username = None ):
- #"""
- #Details about a specific job.
- #Includes details about submission time, jot type, state, events,
- #owner, assigned ressources, walltime etc...
-
- #"""
- #req = "GET_jobs_id"
- #node_list_k = 'assigned_network_address'
- ##Get job info from OAR
- #job_info = self.oar.parser.SendRequest(req, job_id, username)
-
- #logger.debug("IOTLAB_API \t GetJobsId %s " %(job_info))
- #try:
- #if job_info['state'] == 'Terminated':
- #logger.debug("IOTLAB_API \t GetJobsId job %s TERMINATED"\
- #%(job_id))
- #return None
- #if job_info['state'] == 'Error':
- #logger.debug("IOTLAB_API \t GetJobsId ERROR message %s "\
- #%(job_info))
- #return None
-
- #except KeyError:
- #logger.error("IOTLAB_API \tGetJobsId KeyError")
- #return None
-
- #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
- #node_list_k)
- ##Replaces the previous entry
- ##"assigned_network_address" / "reserved_resources"
- ##with "node_ids"
- #job_info.update({'node_ids':parsed_job_info[node_list_k]})
- #del job_info[node_list_k]
- #logger.debug(" \r\nIOTLAB_API \t GetJobsId job_info %s " %(job_info))
- #return job_info
-
-
- def GetJobsResources(self, job_id, username = None):
- """ Gets the list of nodes associated with the job_id and username
- if provided.
-
- Transforms the iotlab hostnames to the corresponding SFA nodes hrns.
- Returns dict key :'node_ids' , value : hostnames list.
-
- :param username: user's LDAP login
- :paran job_id: job's OAR identifier.
- :type username: string
- :type job_id: integer
-
- :returns: dicionary with nodes' hostnames belonging to the job.
+ Get all OAR nodes
+ :returns: nodes with OAR properties
:rtype: dict
- .. warning:: Unused. SA 16/10/13
- """
-
- req = "GET_jobs_id_resources"
-
-
- #Get job resources list from OAR
- node_id_list = self.oar.parser.SendRequest(req, job_id, username)
- logger.debug("IOTLAB_API \t GetJobsResources %s " %(node_id_list))
- resources = self.GetNodes()
- oar_id_node_dict = {}
- for node in resources:
- oar_id_node_dict[node['oar_id']] = node['hostname']
- hostname_list = \
- self.__get_hostnames_from_oar_node_ids(oar_id_node_dict,
- node_id_list)
-
-
- #Replaces the previous entry "assigned_network_address" /
- #"reserved_resources" with "node_ids"
- job_info = {'node_ids': hostname_list}
-
- return job_info
-
-
- def GetNodesCurrentlyInUse(self):
- """Returns a list of all the nodes already involved in an oar running
- job.
- :rtype: list of nodes hostnames.
- """
- return self.oar.parser.SendRequest("GET_running_jobs")
-
- @staticmethod
- def __get_hostnames_from_oar_node_ids(oar_id_node_dict,
- resource_id_list ):
- """Get the hostnames of the nodes from their OAR identifiers.
- Get the list of nodes dict using GetNodes and find the hostname
- associated with the identifier.
- :param oar_id_node_dict: full node dictionary list keyed by oar node id
- :param resource_id_list: list of nodes identifiers
- :returns: list of node hostnames.
- """
-
- hostname_list = []
- for resource_id in resource_id_list:
- #Because jobs requested "asap" do not have defined resources
- if resource_id is not "Undefined":
- hostname_list.append(\
- oar_id_node_dict[resource_id]['hostname'])
-
- return hostname_list
-
- def GetReservedNodes(self, username=None):
- """ Get list of leases. Get the leases for the username if specified,
- otherwise get all the leases. Finds the nodes hostnames for each
- OAR node identifier.
- :param username: user's LDAP login
- :type username: string
- :returns: list of reservations dict
- :rtype: dict list
- """
-
- #Get the nodes in use and the reserved nodes
- reservation_dict_list = \
- self.oar.parser.SendRequest("GET_reserved_nodes", \
- username = username)
-
- # Get the full node dict list once for all
- # so that we can get the hostnames given their oar node id afterwards
- # when the reservations are checked.
- full_nodes_dict_list = self.GetNodes()
- #Put the full node list into a dictionary keyed by oar node id
- oar_id_node_dict = {}
- for node in full_nodes_dict_list:
- oar_id_node_dict[node['oar_id']] = node
-
- for resa in reservation_dict_list:
- logger.debug ("GetReservedNodes resa %s"%(resa))
- #dict list of hostnames and their site
- resa['reserved_nodes'] = \
- self.__get_hostnames_from_oar_node_ids(oar_id_node_dict,
- resa['resource_ids'])
-
- #del resa['resource_ids']
- return reservation_dict_list
-
- def GetNodes(self, node_filter_dict=None, return_fields_list=None):
- """
-
- Make a list of iotlab nodes and their properties from information
- given by OAR. Search for specific nodes if some filters are
- specified. Nodes properties returned if no return_fields_list given:
- 'hrn','archi','mobile','hostname','site','boot_state','node_id',
- 'radio','posx','posy','oar_id','posz'.
-
- :param node_filter_dict: dictionnary of lists with node properties. For
- instance, if you want to look for a specific node with its hrn,
- the node_filter_dict should be {'hrn': [hrn_of_the_node]}
- :type node_filter_dict: dict
- :param return_fields_list: list of specific fields the user wants to be
- returned.
- :type return_fields_list: list
- :returns: list of dictionaries with node properties
- :rtype: list
-
- """
- node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
- node_dict_list = node_dict_by_id.values()
- logger.debug (" IOTLAB_API GetNodes node_filter_dict %s \
- return_fields_list %s " % (node_filter_dict, return_fields_list))
- #No filtering needed return the list directly
- if not (node_filter_dict or return_fields_list):
- return node_dict_list
-
- return_node_list = []
- if node_filter_dict:
- for filter_key in node_filter_dict:
- try:
- #Filter the node_dict_list by each value contained in the
- #list node_filter_dict[filter_key]
- for value in node_filter_dict[filter_key]:
- for node in node_dict_list:
- if node[filter_key] == value:
- if return_fields_list:
- tmp = {}
- for k in return_fields_list:
- tmp[k] = node[k]
- return_node_list.append(tmp)
- else:
- return_node_list.append(node)
- except KeyError:
- logger.log_exc("GetNodes KeyError")
- return
-
-
- return return_node_list
-
-
-
-
-
- def GetSites(self, site_filter_name_list=None, return_fields_list=None):
- """Returns the list of Iotlab's sites with the associated nodes and
- the sites' properties as dictionaries.
-
- Site properties:
- ['address_ids', 'slice_ids', 'name', 'node_ids', 'url', 'person_ids',
- 'site_tag_ids', 'enabled', 'site', 'longitude', 'pcu_ids',
- 'max_slivers', 'max_slices', 'ext_consortium_id', 'date_created',
- 'latitude', 'is_public', 'peer_site_id', 'peer_id', 'abbreviated_name']
- Uses the OAR request GET_sites to find the Iotlab's sites.
-
- :param site_filter_name_list: used to specify specific sites
- :param return_fields_list: field that has to be returned
- :type site_filter_name_list: list
- :type return_fields_list: list
-
-
- """
- site_dict = self.oar.parser.SendRequest("GET_sites")
- #site_dict : dict where the key is the sit ename
- return_site_list = []
- if not (site_filter_name_list or return_fields_list):
- return_site_list = site_dict.values()
- return return_site_list
-
- for site_filter_name in site_filter_name_list:
- if site_filter_name in site_dict:
- if return_fields_list:
- for field in return_fields_list:
- tmp = {}
- try:
- tmp[field] = site_dict[site_filter_name][field]
- except KeyError:
- logger.error("GetSites KeyError %s " % (field))
- return None
- return_site_list.append(tmp)
- else:
- return_site_list.append(site_dict[site_filter_name])
-
- return return_site_list
-
-
- #TODO : Check rights to delete person
- def DeletePerson(self, person_record):
- """Disable an existing account in iotlab LDAP.
-
- Users and techs can only delete themselves. PIs can only
- delete themselves and other non-PIs at their sites.
- ins can delete anyone.
-
- :param person_record: user's record
- :type person_record: dict
- :returns: True if successful, False otherwise.
- :rtype: boolean
-
- .. todo:: CHECK THAT ONLY THE USER OR ADMIN CAN DEL HIMSELF.
- """
- #Disable user account in iotlab LDAP
- ret = self.ldap.LdapMarkUserAsDeleted(person_record)
- logger.warning("IOTLAB_API DeletePerson %s " % (person_record))
- return ret['bool']
-
- def DeleteSlice(self, slice_record):
- """Deletes the specified slice and kills the jobs associated with
- the slice if any, using DeleteSliceFromNodes.
-
- :param slice_record: record of the slice, must contain oar_job_id, user
- :type slice_record: dict
- :returns: True if all the jobs in the slice have been deleted,
- or the list of jobs that could not be deleted otherwise.
- :rtype: list or boolean
-
- .. seealso:: DeleteSliceFromNodes
-
- """
- ret = self.DeleteSliceFromNodes(slice_record)
- delete_failed = None
- for job_id in ret:
- if False in ret[job_id]:
- if delete_failed is None:
- delete_failed = []
- delete_failed.append(job_id)
-
- logger.info("IOTLAB_API DeleteSlice %s answer %s"%(slice_record, \
- delete_failed))
- return delete_failed or True
-
-
-
-
-
-
-
-
-
-
-
- #TODO AddPersonKey 04/07/2012 SA
- def AddPersonKey(self, person_uid, old_attributes_dict, new_key_dict):
- """Adds a new key to the specified account. Adds the key to the
- iotlab ldap, provided that the person_uid is valid.
-
- Non-admins can only modify their own keys.
-
- :param person_uid: user's iotlab login in LDAP
- :param old_attributes_dict: dict with the user's old sshPublicKey
- :param new_key_dict: dict with the user's new sshPublicKey
- :type person_uid: string
-
-
- :rtype: Boolean
- :returns: True if the key has been modified, False otherwise.
-
- """
- ret = self.ldap.LdapModify(person_uid, old_attributes_dict, \
- new_key_dict)
- logger.warning("IOTLAB_API AddPersonKey EMPTY - DO NOTHING \r\n ")
- return ret['bool']
-
- def DeleteLeases(self, leases_id_list, slice_hrn):
- """
-
- Deletes several leases, based on their job ids and the slice
- they are associated with. Uses DeleteJobs to delete the jobs
- on OAR. Note that one slice can contain multiple jobs, and in this
- case all the jobs in the leases_id_list MUST belong to ONE slice,
- since there is only one slice hrn provided here.
-
- :param leases_id_list: list of job ids that belong to the slice whose
- slice hrn is provided.
- :param slice_hrn: the slice hrn.
- :type slice_hrn: string
-
- .. warning:: Does not have a return value since there was no easy
- way to handle failure when dealing with multiple job delete. Plus,
- there was no easy way to report it to the user.
-
- """
- logger.debug("IOTLAB_API DeleteLeases leases_id_list %s slice_hrn %s \
- \r\n " %(leases_id_list, slice_hrn))
- for job_id in leases_id_list:
- self.DeleteJobs(job_id, slice_hrn)
-
- return
-
- @staticmethod
- def _process_walltime(duration):
- """ Calculates the walltime in seconds from the duration in H:M:S
- specified in the RSpec.
-
- """
- if duration:
- # Fixing the walltime by adding a few delays.
- # First put the walltime in seconds oarAdditionalDelay = 20;
- # additional delay for /bin/sleep command to
- # take in account prologue and epilogue scripts execution
- # int walltimeAdditionalDelay = 240; additional delay
- #for prologue/epilogue execution = $SERVER_PROLOGUE_EPILOGUE_TIMEOUT
- #in oar.conf
- # Put the duration in seconds first
- #desired_walltime = duration * 60
- desired_walltime = duration
- # JORDAN : removed the 4 minutes added by default in iotlab
- # XXX total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
- total_walltime = desired_walltime # Needed to have slots aligned in MySlice (temp fix) # JA 11/07/2014
- sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
- walltime = []
- #Put the walltime back in str form
- #First get the hours
- walltime.append(str(total_walltime / 3600))
- total_walltime = total_walltime - 3600 * int(walltime[0])
- #Get the remaining minutes
- walltime.append(str(total_walltime / 60))
- total_walltime = total_walltime - 60 * int(walltime[1])
- #Get the seconds
- walltime.append(str(total_walltime))
-
- else:
- logger.log_exc(" __process_walltime duration null")
-
- return walltime, sleep_walltime
-
- @staticmethod
- def _create_job_structure_request_for_OAR(lease_dict):
- """ Creates the structure needed for a correct POST on OAR.
- Makes the timestamp transformation into the appropriate format.
- Sends the POST request to create the job with the resources in
- added_nodes.
-
- """
-
- nodeid_list = []
- reqdict = {}
-
-
- reqdict['workdir'] = '/tmp'
- reqdict['resource'] = "{network_address in ("
-
- for node in lease_dict['added_nodes']:
- logger.debug("\r\n \r\n OARrestapi \t \
- __create_job_structure_request_for_OAR node %s" %(node))
-
- # Get the ID of the node
- nodeid = node
- reqdict['resource'] += "'" + nodeid + "', "
- nodeid_list.append(nodeid)
-
- custom_length = len(reqdict['resource'])- 2
- reqdict['resource'] = reqdict['resource'][0:custom_length] + \
- ")}/nodes=" + str(len(nodeid_list))
-
-
- walltime, sleep_walltime = \
- IotlabShell._process_walltime(\
- int(lease_dict['lease_duration']))
-
-
- reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
- ":" + str(walltime[1]) + ":" + str(walltime[2])
- reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
-
- #In case of a scheduled experiment (not immediate)
- #To run an XP immediately, don't specify date and time in RSpec
- #They will be set to None.
- if lease_dict['lease_start_time'] is not '0':
- #Readable time accepted by OAR
- # converting timestamp to date in the local timezone tz = None
- start_time = datetime.fromtimestamp( \
- int(lease_dict['lease_start_time']), tz=None).\
- strftime(lease_dict['time_format'])
-
- reqdict['reservation'] = str(start_time)
- #If there is not start time, Immediate XP. No need to add special
- # OAR parameters
-
-
- reqdict['type'] = "deploy"
- reqdict['directory'] = ""
- reqdict['name'] = "SFA_" + lease_dict['slice_user']
-
- return reqdict
-
-
- def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
- lease_start_time, lease_duration, slice_user=None):
-
- """
- Create a job request structure based on the information provided
- and post the job on OAR.
- :param added_nodes: list of nodes that belong to the described lease.
- :param slice_name: the slice hrn associated to the lease.
- :param lease_start_time: timestamp of the lease startting time.
- :param lease_duration: lease durationin minutes
-
- """
- lease_dict = {}
- lease_dict['lease_start_time'] = lease_start_time
- lease_dict['lease_duration'] = lease_duration
- lease_dict['added_nodes'] = added_nodes
- lease_dict['slice_name'] = slice_name
- lease_dict['slice_user'] = slice_user
- lease_dict['grain'] = self.GetLeaseGranularity()
- # I don't know why the SFATIME_FORMAT has changed...
- # from sfa.util.sfatime import SFATIME_FORMAT
- # Let's use a fixed format %Y-%m-%d %H:%M:%S
- #lease_dict['time_format'] = self.time_format
- lease_dict['time_format'] = '%Y-%m-%d %H:%M:%S'
-
-
- logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR slice_user %s\
- \r\n " %(slice_user))
- #Create the request for OAR
- reqdict = self._create_job_structure_request_for_OAR(lease_dict)
- # first step : start the OAR job and update the job
- logger.debug("IOTLAB_API.PY \tLaunchExperimentOnOAR reqdict %s\
- \r\n " %(reqdict))
-
- answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
- reqdict, slice_user)
- logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s " %(answer))
+ :Example:
+ {"items": [
+ {"archi": "a8:at86rf231",
+ "mobile": 0,
+ "mobility_type": " ",
+ "network_address": "a8-53.grenoble.iot-lab.info",
+ "site": "paris",
+ "state": "Alive",
+ "uid": "9856",
+ "x": "0.37",
+ "y": "5.44",
+ "z": "2.33"
+ },
+ {"archi= ...}
+ ]
+ {
+ """
+ logger.warning("iotlashell get_nodes")
+ nodes_dict = {}
try:
- jobid = answer['id']
- except KeyError:
- logger.log_exc("IOTLAB_API \tLaunchExperimentOnOAR \
- Impossible to create job %s " %(answer))
- return None
-
-
-
-
- if jobid :
- logger.debug("IOTLAB_API \tLaunchExperimentOnOAR jobid %s \
- added_nodes %s slice_user %s" %(jobid, added_nodes, \
- slice_user))
+ nodes = experiment.info_experiment(self.api)
+ except HTTPError as err:
+ logger.warning("iotlashell get_nodes error %s" % err.reason)
+ return {'error' : err.reason}
+ for node in nodes['items']:
+ nodes_dict[node['network_address']] = node
+ return nodes_dict
- return jobid
-
-
-
-
-
- #Delete the jobs from job_iotlab table
- def DeleteSliceFromNodes(self, slice_record):
+ def get_users(self):
"""
-
- Deletes all the running or scheduled jobs of a given slice
- given its record.
-
- :param slice_record: record of the slice, must contain oar_job_id, user
- :type slice_record: dict
-
- :returns: dict of the jobs'deletion status. Success= True, Failure=
- False, for each job id.
+ Get all LDAP users
+ :returns: users with LDAP attributes
:rtype: dict
- """
- logger.debug("IOTLAB_API \t DeleteSliceFromNodes %s "
- % (slice_record))
-
- if isinstance(slice_record['oar_job_id'], list):
- oar_bool_answer = {}
- for job_id in slice_record['oar_job_id']:
- ret = self.DeleteJobs(job_id, slice_record['user'])
-
- oar_bool_answer.update(ret)
-
- else:
- oar_bool_answer = self.DeleteJobs(slice_record['oar_job_id'],
- slice_record['user'])
-
- return oar_bool_answer
-
-
-
- def GetLeaseGranularity(self):
- """ Returns the granularity of an experiment in the Iotlab testbed.
- OAR uses seconds for experiments duration , the granulaity is also
- defined in seconds.
- Experiments which last less than 10 min (600 sec) are invalid"""
- return self.grain
-
-
-
- @staticmethod
- def filter_lease(reservation_list, filter_type, filter_value ):
- """Filters the lease reservation list by removing each lease whose
- filter_type is not equal to the filter_value provided. Returns the list
- of leases in one slice, defined by the slice_hrn if filter_type
- is 'slice_hrn'. Otherwise, returns all leases scheduled starting from
- the filter_value if filter_type is 't_from'.
-
- :param reservation_list: leases list
- :type reservation_list: list of dictionary
- :param filter_type: can be either 't_from' or 'slice hrn'
- :type filter_type: string
- :param filter_value: depending on the filter_type, can be the slice_hrn
- or can be defining a timespan.
- :type filter_value: if filter_type is 't_from', filter_value is int.
- if filter_type is 'slice_hrn', filter_value is a string.
-
-
- :returns: filtered_reservation_list, contains only leases running or
- scheduled in the given slice (wanted_slice).Dict keys are
- 'lease_id','reserved_nodes','slice_id', 'state', 'user',
- 'component_id_list','slice_hrn', 'resource_ids', 't_from', 't_until'
- :rtype: list of dict
-
- """
- filtered_reservation_list = list(reservation_list)
- logger.debug("IOTLAB_API \t filter_lease_name reservation_list %s" \
- % (reservation_list))
+ :Example:
+ [{"firstName":"Frederic",
+ "lastName":"Saint-marcel",
+ "email":"frederic.saint-marcel@inria.fr",
+ "structure":"INRIA",
+ "city":"Grenoble",
+ "country":"France",
+ "login":"saintmar",
+ sshPublicKeys":["ssh-rsa AAAAB3..."],
+ "motivations":"test SFA",
+ "validate":true,
+ "admin":true,
+ "createTimeStamp":"20120911115247Z"},
+ {"firstName":"Julien",
+ ...
+ }
+ ]
+ """
+ logger.warning("iotlashell get_users")
+ users_dict = {}
try:
- for reservation in reservation_list:
- if \
- (filter_type is 'slice_hrn' and \
- reservation['slice_hrn'] != filter_value) or \
- (filter_type is 't_from' and \
- reservation['t_from'] > filter_value):
- filtered_reservation_list.remove(reservation)
- except TypeError:
- logger.log_exc("Iotlabshell filter_lease : filter_type %s \
- filter_value %s not in lease" %(filter_type,
- filter_value))
-
- return filtered_reservation_list
-
- # @staticmethod
- # def filter_lease_start_time(reservation_list, timespan):
- # """Filters the lease reservation list by removing each lease whose
- # slice_hrn is not the wanted_slice provided. Returns the list of leases
- # in one slice (wanted_slice).
-
- # """
- # filtered_reservation_list = list(reservation_list)
-
- # for reservation in reservation_list:
- # if 't_from' in reservation and \
- # reservation['t_from'] > timespan:
- # filtered_reservation_list.remove(reservation)
-
- # return filtered_reservation_list
-
-
-
-
-
-
-#TODO FUNCTIONS SECTION 04/07/2012 SA
-
-
- ##TODO UpdateSlice 04/07/2012 SA || Commented out 28/05/13 SA
- ##Funciton should delete and create another job since oin iotlab slice=job
- #def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
- #"""Updates the parameters of an existing slice with the values in
- #slice_fields.
- #Users may only update slices of which they are members.
- #PIs may update any of the slices at their sites, or any slices of
- #which they are members. Admins may update any slice.
- #Only PIs and admins may update max_nodes. Slices cannot be renewed
- #(by updating the expires parameter) more than 8 weeks into the future.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- #logger.warning("IOTLAB_API UpdateSlice EMPTY - DO NOTHING \r\n ")
- #return
-
- #Unused SA 30/05/13, we only update the user's key or we delete it.
- ##TODO UpdatePerson 04/07/2012 SA
- #def UpdatePerson(self, iotlab_hrn, federated_hrn, person_fields=None):
- #"""Updates a person. Only the fields specified in person_fields
- #are updated, all other fields are left untouched.
- #Users and techs can only update themselves. PIs can only update
- #themselves and other non-PIs at their sites.
- #Returns 1 if successful, faults otherwise.
- #FROM PLC API DOC
-
- #"""
- ##new_row = FederatedToIotlab(iotlab_hrn, federated_hrn)
- ##self.leases_db.testbed_session.add(new_row)
- ##self.leases_db.testbed_session.commit()
-
- #logger.debug("IOTLAB_API UpdatePerson EMPTY - DO NOTHING \r\n ")
- #return
-
-
-
-
- #TODO : test
- def DeleteKey(self, user_record, key_string):
- """Deletes a key in the LDAP entry of the specified user.
-
- Removes the key_string from the user's key list and updates the LDAP
- user's entry with the new key attributes.
-
- :param key_string: The ssh key to remove
- :param user_record: User's record
- :type key_string: string
- :type user_record: dict
- :returns: True if sucessful, False if not.
- :rtype: Boolean
-
- """
-
- all_user_keys = user_record['keys']
- all_user_keys.remove(key_string)
- new_attributes = {'sshPublicKey':all_user_keys}
- ret = self.ldap.LdapModifyUser(user_record, new_attributes)
- logger.debug("IOTLAB_API DeleteKey %s- " % (ret))
- return ret['bool']
-
-
-
-
-
-
-
-
- #Update slice unused, therefore sfa_fields_to_iotlab_fields unused
- #SA 30/05/13
- #@staticmethod
- #def sfa_fields_to_iotlab_fields(sfa_type, hrn, record):
- #"""
- #"""
-
- #iotlab_record = {}
- ##for field in record:
- ## iotlab_record[field] = record[field]
-
- #if sfa_type == "slice":
- ##instantion used in get_slivers ?
- #if not "instantiation" in iotlab_record:
- #iotlab_record["instantiation"] = "iotlab-instantiated"
- ##iotlab_record["hrn"] = hrn_to_pl_slicename(hrn)
- ##Unused hrn_to_pl_slicename because Iotlab's hrn already
- ##in the appropriate form SA 23/07/12
- #iotlab_record["hrn"] = hrn
- #logger.debug("IOTLAB_API.PY sfa_fields_to_iotlab_fields \
- #iotlab_record %s " %(iotlab_record['hrn']))
- #if "url" in record:
- #iotlab_record["url"] = record["url"]
- #if "description" in record:
- #iotlab_record["description"] = record["description"]
- #if "expires" in record:
- #iotlab_record["expires"] = int(record["expires"])
-
- ##nodes added by OAR only and then imported to SFA
- ##elif type == "node":
- ##if not "hostname" in iotlab_record:
- ##if not "hostname" in record:
- ##raise MissingSfaInfo("hostname")
- ##iotlab_record["hostname"] = record["hostname"]
- ##if not "model" in iotlab_record:
- ##iotlab_record["model"] = "geni"
-
- ##One authority only
- ##elif type == "authority":
- ##iotlab_record["login_base"] = hrn_to_iotlab_login_base(hrn)
-
- ##if not "name" in iotlab_record:
- ##iotlab_record["name"] = hrn
-
- ##if not "abbreviated_name" in iotlab_record:
- ##iotlab_record["abbreviated_name"] = hrn
-
- ##if not "enabled" in iotlab_record:
- ##iotlab_record["enabled"] = True
-
- ##if not "is_public" in iotlab_record:
- ##iotlab_record["is_public"] = True
-
- #return iotlab_record
-
-
-
-
-
-
-
-
-
-
+ users = self.api.method('admin/users')
+ except HTTPError as err:
+ logger.warning("iotlashell get_users error %s" % err.reason)
+ return {'error' : err.reason}
+ for user in users:
+ users_dict[user['email']] = user
+ return users_dict
+
+
+ def reserve_nodes(self, login, exp_name,
+ nodes_list, start_time, duration):
+ """
+ Submit a physical experiment (nodes list) and reservation date.
+ """
+ # pylint:disable=W0212,R0913,E1123
+ logger.warning("iotlashell reserve_nodes")
+ exp_file = helpers.FilesDict()
+ _experiment = experiment._Experiment(exp_name, duration, start_time)
+ _experiment.type = 'physical'
+ _experiment.nodes = nodes_list
+ exp_file['new_exp.json'] = helpers.json_dumps(_experiment)
+ try:
+ return self.api.method('admin/experiments?user=%s' % login,
+ 'post',
+ files=exp_file)
+ except HTTPError as err:
+ logger.warning("iotlashell reserve_nodes error %s" % err.reason)
+ return {'error' : err.reason}
+
+
+ def get_reserved_nodes(self):
+ """
+ Get all OAR jobs with state Waiting or Running.
+
+ :Example:
+ {"total":"1907",
+ "items":[
+ {"id":9960,
+ "resources": ["m3-16.devgrenoble.iot-lab.info",...],
+ "duration":"36000",
+ "name":"test_sniffer",
+ "state":"Running",
+ "owner":"saintmar",
+ "nb_resources":10,
+ "date":1427966468},
+ {"id": ...}
+ ]
+ }
+ """
+ logger.warning("iotlashell get_reserved_nodes")
+ reserved_nodes_dict = {}
+ request = 'admin/experiments?state=Running,Waiting'
+ try:
+ experiments = self.api.method(request)
+ except HTTPError as err:
+ logger.warning("iotlashell get_reserved_nodes error %s" %
+ err.reason)
+ return {'error' : err.reason}
+ for exp in experiments['items']:
+ # BUG IN OAR REST API : job with reservation didn't return
+ # resources attribute list
+ # we use another request for finding job resources
+ exp_nodes = self.api.method('admin/experiments/%d' % exp['id'])
+ exp['resources'] = exp_nodes['nodes']
+ reserved_nodes_dict[exp['id']] = exp
+ return reserved_nodes_dict
+
+
+ def add_user(self, slice_user):
+ """
+ Add LDAP user
+ """
+ # pylint:disable=E1123
+ logger.warning("iotlashell add_user")
+ user = {"type" : "SA", # single account creation
+ "city" : "To be defined",
+ "country" : "To be defined",
+ "motivations" : "SFA federation"}
+ email = slice_user['email']
+ user['email'] = email
+ user['sshPublicKey'] = slice_user['keys'][0]
+ # ex : onelab.inria
+ user['structure'] = slice_user['slice_record']['authority']
+ email = (email.split('@'))[0]
+ user['firstName'] = email.split('.')[0]
+ try:
+ user['lastName'] = email.split('.')[1]
+ except IndexError:
+ user['lastName'] = email.split('.')[0]
+ try:
+ self.api.method('admin/users', 'post',
+ json=user)
+ except HTTPError as err:
+ logger.warning("iotlashell add_user error %s" % err.reason)
+
\ No newline at end of file
+++ /dev/null
-"""
-This file defines the IotlabSlices class by which all the slice checkings
-upon lease creation are done.
-"""
-from sfa.util.xrn import get_authority, urn_to_hrn, hrn_to_urn
-from sfa.util.sfalogging import logger
-
-MAXINT = 2L**31-1
-
-
-class IotlabSlices:
- """
- This class is responsible for checking the slice when creating a
- lease or a sliver. Those checks include verifying that the user is valid,
- that the slice is known from the testbed or from our peers, that the list
- of nodes involved has not changed (in this case the lease is modified
- accordingly).
- """
- rspec_to_slice_tag = {'max_rate': 'net_max_rate'}
-
- def __init__(self, driver):
- """
- Get the reference to the driver here.
- """
- self.driver = driver
-
- def get_peer(self, xrn):
- """
- Finds the authority of a resource based on its xrn.
- If the authority is Iotlab (local) return None,
- Otherwise, look up in the DB if Iotlab is federated with this site
- authority and returns its DB record if it is the case.
-
- :param xrn: resource's xrn
- :type xrn: string
- :returns: peer record
- :rtype: dict
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
- #Does this slice belong to a local site or a peer iotlab site?
- peer = None
-
- # get this slice's authority (site)
- slice_authority = get_authority(hrn)
- #Iotlab stuff
- #This slice belongs to the current site
- if slice_authority == self.driver.testbed_shell.root_auth:
- site_authority = slice_authority
- return None
-
- site_authority = get_authority(slice_authority).lower()
- # get this site's authority (sfa root authority or sub authority)
-
- logger.debug("IOTLABSLICES \t get_peer slice_authority %s \
- site_authority %s hrn %s"
- % (slice_authority, site_authority, hrn))
-
- # check if we are already peered with this site_authority
- #if so find the peer record
- peers = self.driver.GetPeers(peer_filter=site_authority)
- for peer_record in peers:
- if site_authority == peer_record.hrn:
- peer = peer_record
- logger.debug(" IOTLABSLICES \tget_peer peer %s " % (peer))
- return peer
-
- def get_sfa_peer(self, xrn):
- """Returns the authority name for the xrn or None if the local site
- is the authority.
-
- :param xrn: the xrn of the resource we are looking the authority for.
- :type xrn: string
- :returns: the resources's authority name.
- :rtype: string
-
- """
- hrn, hrn_type = urn_to_hrn(xrn)
-
- # return the authority for this hrn or None if we are the authority
- sfa_peer = None
- slice_authority = get_authority(hrn)
- site_authority = get_authority(slice_authority)
-
- if site_authority != self.driver.hrn:
- sfa_peer = site_authority
-
- return sfa_peer
-
- def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
- """
- Compare requested leases with the leases already scheduled/
- running in OAR. If necessary, delete and recreate modified leases,
- and delete no longer requested ones.
-
- :param sfa_slice: sfa slice record
- :param requested_jobs_dict: dictionary of requested leases
- :param peer: sfa peer record
-
- :type sfa_slice: dict
- :type requested_jobs_dict: dict
- :type peer: dict
- :returns: leases list of dictionary
- :rtype: list
-
- """
-
- logger.debug("IOTLABSLICES verify_slice_leases sfa_slice %s "
- % (sfa_slice))
- #First get the list of current leases from OAR
- leases = self.driver.GetLeases({'slice_hrn': sfa_slice['hrn']})
- logger.debug("IOTLABSLICES verify_slice_leases requested_jobs_dict %s \
- leases %s " % (requested_jobs_dict, leases))
-
- current_nodes_reserved_by_start_time = {}
- requested_nodes_by_start_time = {}
- leases_by_start_time = {}
- reschedule_jobs_dict = {}
-
- #Create reduced dictionary with key start_time and value
- # the list of nodes
- #-for the leases already registered by OAR first
- # then for the new leases requested by the user
-
- #Leases already scheduled/running in OAR
- for lease in leases:
- current_nodes_reserved_by_start_time[lease['t_from']] = \
- lease['reserved_nodes']
- leases_by_start_time[lease['t_from']] = lease
-
- #First remove job whose duration is too short
- for job in requested_jobs_dict.values():
- job['duration'] = \
- str(int(job['duration']) \
- * self.driver.testbed_shell.GetLeaseGranularity())
- if job['duration'] < \
- self.driver.testbed_shell.GetLeaseGranularity():
- del requested_jobs_dict[job['start_time']]
-
- #Requested jobs
- for start_time in requested_jobs_dict:
- requested_nodes_by_start_time[int(start_time)] = \
- requested_jobs_dict[start_time]['hostname']
- #Check if there is any difference between the leases already
- #registered in OAR and the requested jobs.
- #Difference could be:
- #-Lease deleted in the requested jobs
- #-Added/removed nodes
- #-Newly added lease
-
- logger.debug("IOTLABSLICES verify_slice_leases \
- requested_nodes_by_start_time %s \
- "% (requested_nodes_by_start_time))
- #Find all deleted leases
- start_time_list = \
- list(set(leases_by_start_time.keys()).\
- difference(requested_nodes_by_start_time.keys()))
- deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
- for start_time in start_time_list]
-
-
- #Find added or removed nodes in exisiting leases
- for start_time in requested_nodes_by_start_time:
- logger.debug("IOTLABSLICES verify_slice_leases start_time %s \
- "%( start_time))
- if start_time in current_nodes_reserved_by_start_time:
-
- # JORDAN : if we request the same nodes: do nothing
- if requested_nodes_by_start_time[start_time] == \
- current_nodes_reserved_by_start_time[start_time]:
- continue
-
- else:
- update_node_set = \
- set(requested_nodes_by_start_time[start_time])
- added_nodes = \
- update_node_set.difference(\
- current_nodes_reserved_by_start_time[start_time])
- shared_nodes = \
- update_node_set.intersection(\
- current_nodes_reserved_by_start_time[start_time])
- old_nodes_set = \
- set(\
- current_nodes_reserved_by_start_time[start_time])
- removed_nodes = \
- old_nodes_set.difference(\
- requested_nodes_by_start_time[start_time])
- logger.debug("IOTLABSLICES verify_slice_leases \
- shared_nodes %s added_nodes %s removed_nodes %s"\
- %(shared_nodes, added_nodes,removed_nodes ))
- #If the lease is modified, delete it before
- #creating it again.
- #Add the deleted lease job id in the list
- #WARNING :rescheduling does not work if there is already
- # 2 running/scheduled jobs because deleting a job
- #takes time SA 18/10/2012
- if added_nodes or removed_nodes:
- deleted_leases.append(\
- leases_by_start_time[start_time]['lease_id'])
- #Reschedule the job
- if added_nodes or shared_nodes:
- reschedule_jobs_dict[str(start_time)] = \
- requested_jobs_dict[str(start_time)]
-
- else:
- #New lease
-
- job = requested_jobs_dict[str(start_time)]
- logger.debug("IOTLABSLICES \
- NEWLEASE slice %s job %s"
- % (sfa_slice, job))
- job_id = self.driver.AddLeases(
- job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
-
- # Removed by jordan
- #if job_id is not None:
- # new_leases = self.driver.GetLeases(login=
- # sfa_slice['login'])
- # for new_lease in new_leases:
- # leases.append(new_lease)
-
- #Deleted leases are the ones with lease id not declared in the Rspec
- if deleted_leases:
- self.driver.testbed_shell.DeleteLeases(deleted_leases,
- sfa_slice['login'])
- #self.driver.testbed_shell.DeleteLeases(deleted_leases,
- # sfa_slice['user']['uid'])
- logger.debug("IOTLABSLICES \
- verify_slice_leases slice %s deleted_leases %s"
- % (sfa_slice, deleted_leases))
-
- if reschedule_jobs_dict:
- for start_time in reschedule_jobs_dict:
- job = reschedule_jobs_dict[start_time]
- self.driver.AddLeases(
- job['hostname'],
- sfa_slice, int(job['start_time']),
- int(job['duration']))
-
- # Added by Jordan: until we find a better solution, always update the list of leases
- return self.driver.GetLeases(login= sfa_slice['login'])
- #return leases
-
- def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
- """Check for wanted and unwanted nodes in the slice.
-
- Removes nodes and associated leases that the user does not want anymore
- by deleteing the associated job in OAR (DeleteSliceFromNodes).
- Returns the nodes' hostnames that are going to be in the slice.
-
- :param sfa_slice: slice record. Must contain node_ids and list_node_ids.
-
- :param requested_slivers: list of requested nodes' hostnames.
- :param peer: unused so far.
-
- :type sfa_slice: dict
- :type requested_slivers: list
- :type peer: string
-
- :returns: list requested nodes hostnames
- :rtype: list
-
- .. warning:: UNUSED SQA 24/07/13
- .. seealso:: DeleteSliceFromNodes
- .. todo:: check what to do with the peer? Can not remove peer nodes from
- slice here. Anyway, in this case, the peer should have gotten the
- remove request too.
-
- """
- current_slivers = []
- deleted_nodes = []
-
- if 'node_ids' in sfa_slice:
- nodes = self.driver.testbed_shell.GetNodes(
- sfa_slice['list_node_ids'],
- ['hostname'])
- current_slivers = [node['hostname'] for node in nodes]
-
- # remove nodes not in rspec
- deleted_nodes = list(set(current_slivers).
- difference(requested_slivers))
-
- logger.debug("IOTLABSLICES \tverify_slice_nodes slice %s\
- \r\n \r\n deleted_nodes %s"
- % (sfa_slice, deleted_nodes))
-
- if deleted_nodes:
- #Delete the entire experience
- self.driver.testbed_shell.DeleteSliceFromNodes(sfa_slice)
- return nodes
-
- def verify_slice(self, slice_hrn, slice_record, sfa_peer):
- """Ensures slice record exists.
-
- The slice record must exist either in Iotlab or in the other
- federated testbed (sfa_peer). If the slice does not belong to Iotlab,
- check if the user already exists in LDAP. In this case, adds the slice
- to the sfa DB and associates its LDAP user.
-
- :param slice_hrn: slice's name
- :param slice_record: sfa record of the slice
- :param sfa_peer: name of the peer authority if any.(not Iotlab).
-
- :type slice_hrn: string
- :type slice_record: dictionary
- :type sfa_peer: string
-
- .. seealso:: AddSlice
-
-
- """
-
- slicename = slice_hrn
- sfa_slice = None
-
- # check if slice belongs to Iotlab
- if slicename.startswith("iotlab"):
- slices_list = self.driver.GetSlices(slice_filter=slicename,
- slice_filter_type='slice_hrn')
-
- if slices_list:
- for sl in slices_list:
-
- logger.debug("IOTLABSLICES \t verify_slice slicename %s \
- slices_list %s sl %s \r slice_record %s"
- % (slicename, slices_list, sl, slice_record))
- sfa_slice = sl
- sfa_slice.update(slice_record)
-
- else:
- #Search for user in ldap based on email SA 14/11/12
- ldap_user = self.driver.testbed_shell.ldap.LdapFindUser(\
- slice_record['user'])
- logger.debug(" IOTLABSLICES \tverify_slice Oups \
- slice_record %s sfa_peer %s ldap_user %s"
- % (slice_record, sfa_peer, ldap_user))
- #User already registered in ldap, meaning user should be in SFA db
- #and hrn = sfa_auth+ uid
- sfa_slice = {'hrn': slicename,
- 'node_list': [],
- 'authority': slice_record['authority'],
- 'gid': slice_record['gid'],
- #'slice_id': slice_record['record_id'],
- 'reg-researchers': slice_record['reg-researchers'],
- 'urn': hrn_to_urn(slicename,'slice'),
- #'peer_authority': str(sfa_peer)
- }
-
- if ldap_user:
-# hrn = self.driver.testbed_shell.root_auth + '.' \
-# + ldap_user['uid']
- for hrn in slice_record['reg-researchers']:
- user = self.driver.get_user_record(hrn)
- if user:
- break
-
- logger.debug(" IOTLABSLICES \tverify_slice hrn %s USER %s"
- % (hrn, user))
-
- # add the external slice to the local SFA iotlab DB
- if sfa_slice:
- self.driver.AddSlice(sfa_slice, user)
-
- logger.debug("IOTLABSLICES \tverify_slice ADDSLICE OK")
- return sfa_slice
-
-
- def verify_persons(self, slice_hrn, slice_record, users, options=None):
- """Ensures the users in users list exist and are enabled in LDAP. Adds
- person if needed (AddPerson).
-
- Checking that a user exist is based on the user's email. If the user is
- still not found in the LDAP, it means that the user comes from another
- federated testbed. In this case an account has to be created in LDAP
- so as to enable the user to use the testbed, since we trust the testbed
- he comes from. This is done by calling AddPerson.
-
- :param slice_hrn: slice name
- :param slice_record: record of the slice_hrn
- :param users: users is a record list. Records can either be
- local records or users records from known and trusted federated
- sites.If the user is from another site that iotlab doesn't trust
- yet, then Resolve will raise an error before getting to allocate.
-
- :type slice_hrn: string
- :type slice_record: string
- :type users: list
-
- .. seealso:: AddPerson
- .. note:: Removed unused peer and sfa_peer parameters. SA 18/07/13.
- """
- if options is None: options={}
- user = slice_record['user']
- logger.debug("IOTLABSLICES \tverify_persons \tuser %s " % user)
- person = {
- 'peer_person_id': None,
- 'mail' : user['email'],
- 'email' : user['email'],
- 'key_ids' : user.get('key_ids', []),
- 'hrn' : user['hrn'],
- }
- if 'first_name' in user:
- person['first_name'] = user['first_name']
- if 'last_name' in user:
- person['last_name'] = user['last_name']
- if 'person_id' in user:
- person['person_id'] = user['person_id']
- if user['keys']:
- # Only one key is kept for IoTLAB
- person['pkey'] = user['keys'][0]
- # SFA DB (if user already exist we do nothing)
- self.driver.add_person_to_db(person)
- # Iot-LAB LDAP (if user already exist we do nothing)
- ret = self.driver.AddPerson(person)
- # user uid information is only in LDAP
- # Be carreful : global scope of dict slice_record in driver
- slice_record['login'] = ret['uid']
- return person
-
-
-
- def verify_keys(self, persons, users, peer, options=None):
- """
- .. warning:: unused
- """
- if options is None: options={}
- # existing keys
- key_ids = []
- for person in persons:
- key_ids.extend(person['key_ids'])
- keylist = self.driver.GetKeys(key_ids, ['key_id', 'key'])
-
- keydict = {}
- for key in keylist:
- keydict[key['key']] = key['key_id']
- existing_keys = keydict.keys()
-
- persondict = {}
- for person in persons:
- persondict[person['email']] = person
-
- # add new keys
- requested_keys = []
- updated_persons = []
- users_by_key_string = {}
- for user in users:
- user_keys = user.get('keys', [])
- updated_persons.append(user)
- for key_string in user_keys:
- users_by_key_string[key_string] = user
- requested_keys.append(key_string)
- if key_string not in existing_keys:
- key = {'key': key_string, 'key_type': 'ssh'}
- #try:
- ##if peer:
- #person = persondict[user['email']]
- #self.driver.testbed_shell.UnBindObjectFromPeer(
- # 'person',person['person_id'],
- # peer['shortname'])
- ret = self.driver.testbed_shell.AddPersonKey(
- user['email'], key)
- #if peer:
- #key_index = user_keys.index(key['key'])
- #remote_key_id = user['key_ids'][key_index]
- #self.driver.testbed_shell.BindObjectToPeer('key', \
- #key['key_id'], peer['shortname'], \
- #remote_key_id)
-
- # remove old keys (only if we are not appending)
- append = options.get('append', True)
- if append is False:
- removed_keys = set(existing_keys).difference(requested_keys)
- for key in removed_keys:
- #if peer:
- #self.driver.testbed_shell.UnBindObjectFromPeer('key', \
- #key, peer['shortname'])
-
- user = users_by_key_string[key]
- self.driver.testbed_shell.DeleteKey(user, key)
-
- return
+++ /dev/null
-""" specialized Xrn class for Iotlab. SA
-"""
-import re
-from sfa.util.xrn import Xrn
-
-def xrn_to_hostname(xrn):
- """Returns a node's hostname from its xrn.
- :param xrn: The nodes xrn identifier.
- :type xrn: Xrn (from sfa.util.xrn)
-
- :returns: node's hostname.
- :rtype: string
-
- """
- return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
-
-
-def xrn_object(root_auth, hostname):
- """Creates a valid xrn object from the node's hostname and the authority
- of the SFA server.
-
- :param hostname: the node's hostname.
- :param root_auth: the SFA root authority.
- :type hostname: string
- :type root_auth: string
-
- :returns: the iotlab node's xrn
- :rtype: Xrn
-
- """
- return Xrn('.'.join([root_auth, Xrn.escape(hostname)]), type='node')
-
-# temporary helper functions to use this module instead of namespace
-def hostname_to_hrn (auth, hostname):
- """Turns node hostname into hrn.
- :param auth: Site authority.
- :type auth: string
- :param hostname: Node hostname.
- :type hostname: string.
-
- :returns: Node's hrn.
- :rtype: string
- """
- return IotlabXrn(auth=auth, hostname=hostname).get_hrn()
-
-def hostname_to_urn(auth, hostname):
- """Turns node hostname into urn.
- :param auth: Site authority.
- :type auth: string
- :param hostname: Node hostname.
- :type hostname: string.
-
- :returns: Node's urn.
- :rtype: string
- """
- return IotlabXrn(auth=auth, hostname=hostname).get_urn()
-
-# def slicename_to_hrn (auth_hrn, slicename):
- # return IotlabXrn(auth=auth_hrn, slicename=slicename).get_hrn()
-
-# def hrn_to_iotlab_slicename (hrn):
-# return IotlabXrn(xrn=hrn, type='slice').iotlab_slicename()
-
-# def hrn_to_iotlab_authname (hrn):
-# return IotlabXrn(xrn=hrn, type='any').iotlab_authname()
-
-
-class IotlabXrn (Xrn):
- """
- Defines methods to turn a hrn/urn into a urn/hrn, or to get the name
- of the slice/user from the hrn.
- """
- @staticmethod
- def site_hrn (auth):
- """Returns the site hrn, which is also the testbed authority in
- iotlab/cortexlab.
- """
- return auth
-
- def __init__ (self, auth=None, hostname=None, login=None,
- slicename=None, **kwargs):
- #def hostname_to_hrn(auth_hrn, login_base, hostname):
- if hostname is not None:
- self.type = 'node'
- # keep only the first part of the DNS name
- # escape the '.' in the hostname
- self.hrn = '.'.join( [auth, Xrn.escape(hostname)] )
- self.hrn_to_urn()
-
- elif login is not None:
- self.type = 'person'
- self.hrn = '.'.join([auth, login])
- self.hrn_to_urn()
- #def slicename_to_hrn(auth_hrn, slicename):
- elif slicename is not None:
- self.type = 'slice'
- slicename = '_'.join([login, "slice"])
- self.hrn = '.'.join([auth, slicename])
- self.hrn_to_urn()
- # split at the first _
-
- else:
- Xrn.__init__ (self, **kwargs)
-
-
- def iotlab_slicename (self):
- """Returns the slice name from an iotlab slice hrn.
-
- :rtype: string
- :returns: slice name.
- """
-
- self._normalize()
- leaf = self.leaf
- sliver_id_parts = leaf.split(':')
- name = sliver_id_parts[0]
- name = re.sub('[^a-zA-Z0-9_]', '', name)
- return name
-
- #def hrn_to_pl_authname(hrn):
- # def iotlab_authname (self):
- # self._normalize()
- # return self.authority[-1]
-
- # def iotlab_login_base (self):
- # self._normalize()
- # if self.type and self.type.startswith('authority'):
- # base = self.leaf
- # else:
- # base = self.authority[-1]
-
- # # Fix up names of GENI Federates
- # base = base.lower()
- # base = re.sub('\\\[^a-zA-Z0-9]', '', base)
-
- # if len(base) > 20:
- # base = base[len(base)-20:]
-
- # return base
class IotlabPosition(Element):
- fields = ['posx', 'posy','posz']
+ fields = ['x', 'y','z']
class IotlabLocation(Location):
fields = list(Location.fields)
+++ /dev/null
-This location is a placeholder for any specifics about
-e.g. deployments or test scripts that do not belong in sfa/ because we
-do not want them to be packaged.
+++ /dev/null
-python-apt
-python-dateutil
-python-debian
-python-debianbts
-python-ldap
-SOAPpy
-SQLAlchemy
-Tempita
-argparse
-chardet
-decorator
-passlib
-psycopg2
-pyOpenSSL
-elementtree
-lxml
-nose
-simplejson
-sqlalchemy-migrate
-wsgiref
-
+++ /dev/null
-###########################################################################
-# Copyright (C) 2012 by
-# <savakian@sfa2.grenoble.iotlab.info>
-#
-# Copyright: See COPYING file that comes with this distribution
-#
-###########################################################################
-#LDAP import
-from sfa.iotlab.LDAPapi import LDAPapi
-import ldap.modlist as modlist
-
-#logger sfa
-from sfa.util.sfalogging import logger
-
-#OAR imports
-from datetime import datetime
-from sfa.util.sfatime import SFATIME_FORMAT
-from sfa.iotlab.OARrestapi import OARrestapi
-
-#Test iotlabdriver
-from sfa.iotlab.iotlabdriver import IotlabDriver
-from sfa.iotlab.iotlabshell import IotlabShell
-from sfa.util.config import Config
-
-from sfa.generic import Generic
-import os
-import sys
-
-
-def message_and_wait(message):
- print message
- raw_input("Press Enter to continue...")
-
-def parse_options():
-
- #arguments supplied
- if len(sys.argv) > 1 :
- options_list = sys.argv[1:]
- #For each valid option, execute the associated function
- #(defined in the dictionnary supported_options)
- job_id = 1
- valid_options_dict = {}
- value_list = []
- #Passing options to the script should be done like this :
- #-10 OAR -2 IotlabDriver
- for option in options_list:
- if option in supported_options:
- #update the values used for the fonctions associated
- #with the options
-
- valid_options_dict[option] = value_list
- #empty the values list for next option
- value_list = []
- print valid_options_dict
- else:
- if option[0] == '-':
- value_list.append(option[1:])
- print "value_list", value_list
-
-
- return valid_options_dict
-
-def TestLdap(uid = None):
- logger.setLevelDebug()
-
- ldap_server = LDAPapi()
- ret = ldap_server.conn.connect(bind=True)
- ldap_server.conn.close()
- print "TEST ldap_server.conn.connect(bind=True)" , ret
-
- ret = ldap_server.conn.connect(bind=False)
- ldap_server.conn.close()
- print "TEST ldap_server.conn.connect(bind=False)", ret
-
- message_and_wait("\r\n \tLdapSeach : Get all users")
- ret = ldap_server.LdapSearch()
- print "\r\n", ret
-
- message_and_wait("\r\n \tLdapSeach : Get user with uid avakian")
- ret = ldap_server.LdapSearch('(uid=avakian)', [])
- print "\r\n", ret
-
- message_and_wait("\r\n generate ...")
- password = ldap_server.login_pwd.generate_password()
- print "\r\n TEST generate_password ", password
-
- data = {}
- data['last_name'] = "Drake"
- data['first_name'] = "Tim"
- data['givenName'] = data['first_name']
- data['mail'] = "robin@arkham.fr"
-
- record = {}
- record['hrn'] = 'iotlab.drake'
- record['last_name'] = "Drake"
- record['first_name'] = "Tim"
- record['mail'] = "robin@arkham.fr"
-
- login = ldap_server.LdapGenerateUniqueLogin(data)
- print "\r\n Robin \tgenerate_login ", login
-
- message_and_wait("\r\n find_max_uidNumber")
- maxi = ldap_server.find_max_uidNumber()
- print maxi
-
-
-
- ret = ldap_server.LdapAddUser(data)
- print "\r\n Robin \tLdapAddUser ", ret
-
- req_ldap = '(uid=' + login + ')'
- ret = ldap_server.LdapSearch(req_ldap, [])
- print "\r\n Robin \tldap_server.LdapSearch ids = %s %s" % (login, ret)
-
- message_and_wait("Password methods")
- password = "Thridrobin"
- enc = ldap_server.login_pwd.encrypt_password(password)
- print "\r\n Robin \tencrypt_password ", enc
-
- ret = ldap_server.LdapModifyUser(record, {'userPassword':enc})
- print "\r\n Robin \tChange password LdapModifyUser ", ret
-
-
-
- datanight = {}
- datanight['last_name'] = "Grayson"
- datanight['first_name'] = "Dick"
- datanight['givenName'] = datanight['first_name']
- datanight['mail'] = "nightwing@arkham.fr"
-
-
- record_night = {}
- record_night['hrn'] = 'iotlab.grayson'
- record_night['last_name'] = datanight['last_name']
- record_night['first_name'] = datanight['first_name']
- record_night['mail'] = datanight['mail']
-
- message_and_wait("\r\n LdapFindUser")
- ret = ldap_server.LdapFindUser(record_night)
- print "\r\n Nightwing \tldap_server.LdapFindUser %s : %s" % (record_night,
- ret)
-
- #ret = ldap_server.LdapSearch('(uid=grayson)', [])
- #print "\r\n Nightwing \tldap_server.LdapSearch ids = %s %s" %('grayson',ret )
- message_and_wait("Add user then delete user")
- ret = ldap_server.LdapAddUser(datanight)
- print "\r\n Nightwing \tLdapAddUser ", ret
-
- #ret = ldap_server.LdapResetPassword(record_night)
- #print "\r\n Nightwing \tLdapResetPassword de %s : %s" % (record_night, ret)
-
- ret = ldap_server.LdapDeleteUser(record_night)
- print "\r\n Nightwing \tLdapDeleteUser ", ret
-
-
- #record_myslice = {}
- #record_myslice['hrn']= 'iotlab.myslice'
- #record_myslice['last_name'] = 'myslice'
- #record_myslice['first_name'] = 'myslice'
- #record_myslice['mail'] = 'nturro@inria.fr'
- #pubkeymyslice = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuyRPwn8PZxjdhu+ciRuPyM0eVBn7XS7i3tym9F30UVhaCd09a/UEmGn7WJZdfsxV3hXqG1Wc766FEst97NuzHzELSuvy/rT96J0UHG4wae4pnzOLd6NwFdZh7pkPsgHMHxK9ALVE68Puu+EDSOB5bBZ9Q624wCIGxEpmuS/+X+dDBTKgG5Hi0WA1uKJwhLSbbXb38auh4FlYgXPsdpljTIJatt+zGL0Zsy6fdrsVRc5W8kr3/SmE4OMNyabKBNyxioSEuYhRSjoQAHnYoevEjZniP8IzscKK7qwelzGUfnJEzexikhsQamhAFti2ReiFfoHBRZxnSc49ioH7Kaci5w== root@rhoecos3.ipv6.lip6.fr"
-
- #pubkeytestuser = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYS8tzufciTm6GdNUGHQc64OfTxFebMYUwh/Jl04IPTvjjr26uakbM0M2v33HxZ5Q7PnmPN9pB/w+a+f7a7J4cNs/tApOMg2hb6UrLaOrdnDMOs4KZlfElyDsF3Zx5QwxPYvzsKADAbDVoX4NF9PttuDLdm2l3nLSvm89jfla00GBg+K8grdOCHyYZVX/Wt7kxhXDK3AidQhKJgn+iD5GxvtWMBE+7S5kJGdRW1W10lSLBW3+VNsCrKJB2s8L55Xz/l2HNBScU7T0VcMQJrFxEXKzLPagZsMz0lfLzHESoGHIZ3Tz85DfECbTtMxLts/4KoAEc3EE+PYr2VDeAggDx testuser@myslice"
-
-
-
-
- return
-
-
-def get_stuff(oar, uri):
- import httplib
- import json
- headers = {}
- data = json.dumps({})
-
- headers['X-REMOTE_IDENT'] = 'avakian'
- headers['content-length'] = '0' #seems that it does not work if we don't add this
-
-
- conn = httplib.HTTPConnection(oar.oarserver['ip'], oar.oarserver['port'])
- conn.request("GET", uri, data , headers )
- resp = (conn.getresponse()).read()
-
- conn.close()
-
-
- js = json.loads(resp)
- return js
-
-
-def TestOAR(job_id = None):
- print "JOB_ID", job_id
- if isinstance(job_id, list) :
- if len(job_id) >= 1:
- job_id = job_id[0]
- else:
- job_id = '1'
- else:
- job_id = '1'
- print "JOB_ID", job_id
- oar = OARrestapi()
- print "============USING OAR CLASS PARSING METHODS ================"
-
- message_and_wait("\r\nGET_reserved_nodes")
- nodes = oar.parser.SendRequest("GET_reserved_nodes", username = 'avakian')
- print "\r\n OAR GET_reserved_nodes ", nodes
-
- message_and_wait("GET_jobs")
- jobs = oar.parser.SendRequest("GET_jobs")
- print "\r\n OAR GET_jobs ", jobs
-
- message_and_wait( "\r\n GET_jobs_id")
- jobs = oar.parser.SendRequest("GET_jobs_id", job_id, 'avakian')
- print "\r\n OAR GET_jobs_id ", jobs
-
- # Check that the OAR requests are valid
-
- print "============RAW JSON FROM OAR ================"
- message_and_wait("\r\n Get all the jobs in the state Running,Waiting, \
- Launching of the user ")
- uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
-
- message_and_wait("\r\nGet information on the job identified by its job_id")
- uri = '/oarapi/jobs/' + job_id +'.json'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
-
- message_and_wait(" \r\nGet all the job's resources, \
- job defined by its job id %s"%(job_id))
- uri = '/oarapi/jobs/' + job_id + '/resources.json'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
-
- message_and_wait("\r\n Get server's date and timezone")
- server_timestamp, server_tz = oar.parser.SendRequest("GET_timezone")
- print "\r\n OAR GetTimezone ", server_timestamp, server_tz
- print(datetime.fromtimestamp(int(server_timestamp)).strftime(SFATIME_FORMAT))
-
- message_and_wait("\r\n Get all the resources with details from OAR")
- uri = '/oarapi/resources/full.json'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
- message_and_wait("\r\n Get all the jobs scheduled by the user")
- uri = '/oarapi/jobs.json?user=avakian'
- raw_json = get_stuff(oar, uri)
- print "\r\n OAR uri %s \r\n \t raw_json %s \r\n raw_json_keys %s " %(uri,
- raw_json, raw_json.keys())
-
- return
-
-
-
-def TestIotlabshell(param = None):
-
- config = Config()
- shell = IotlabShell(config)
-
- message_and_wait("\r\n \r\n GetReservedNodes")
- nodes = shell.GetReservedNodes()
- print nodes
-
- message_and_wait("\r\n GetPersons")
- persons = shell.GetPersons()
- print "\r\n \r\n GetPersons", persons
-
-
- message_and_wait("\r\n GetLeases for the login avakian")
- leases = shell.GetLeases(login='avakian')
- print leases
-
- message_and_wait("\r\n GetLeases for slice iotlab.avakian_slice")
- leases = shell.GetLeases(lease_filter_dict=
- {'slice_hrn':'iotlab.avakian_slice'})
- print leases
-
- message_and_wait("\r\n GetLeases t_from 1405070000 ")
- leases = shell.GetLeases(lease_filter_dict={'t_from':1405070000})
- print leases
-
-def TestIotlabDriver(job_id = None):
- if job_id is None:
- job_id = 1
-
- if isinstance(job_id, list) and len(job_id) == 1:
- job_id = job_id[0]
-
- api = Generic.the_flavour().make_api(interface='registry')
- iotlabdriver = IotlabDriver(api)
-
- # Iotlabdriver methods
- slice_hrn = 'iotlab.avakian_slice'
- message_and_wait(("\r\n GetSlices slice_hrn %s "%(slice_hrn)))
- sl = iotlabdriver.GetSlices(
- slice_filter= slice_hrn, slice_filter_type='slice_hrn')
- print sl
-
- message_and_wait("\r\n GetSlices slice filter 20 (record_id_user) ")
- sl = iotlabdriver.GetSlices(slice_filter='20',
- slice_filter_type='record_id_user')
- print sl
-
- message_and_wait("\r\n GetSlices :all slice")
- sl = iotlabdriver.GetSlices()
- print sl
-
-
-
-
-
-
-def TestSQL(arg = None):
- from sfa.storage.model import make_record, RegSlice, RegRecord
- from sfa.storage.alchemy import global_dbsession
-
-
- from sqlalchemy.orm import joinedload
-
- slice_hrn = 'iotlab.avakian_slice'
- request = global_dbsession.query(RegSlice).options(joinedload('reg_researchers'))
- solo_query_slice_list = request.filter_by(hrn=slice_hrn).first()
-
- print "\r\n \r\n =========== solo_query_slice_list RegSlice \
- joinedload('reg_researchers') slice_hrn %s first %s \r\n \t "\
- %(slice_hrn, solo_query_slice_list.__dict__)
-
- query_slice_list = request.all()
- print "\r\n \r\n =========== query_slice_list RegSlice \
- joinedload('reg_researchers') ALL \r\n \t", \
- query_slice_list[0].__dict__
-
- return_slicerec_dictlist = []
- record = query_slice_list[0]
- print "\r\n \r\n =========== \r\n \t", record
-
- tmp = record.__dict__
- print "\r\n \r\n =========== \r\n \t", tmp
- tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
- print "\r\n \r\n =========== \r\n \t", tmp
- #del tmp['reg_researchers']['_sa_instance_state']
- return_slicerec_dictlist.append(tmp)
-
- print "\r\n \r\n =========== \r\n \t", return_slicerec_dictlist
-
- all_records = global_dbsession.query(RegRecord).all()
-
-
-
-def RunAll( arg ):
- TestLdap()
- TestOAR()
- TestIotlabDriver()
- TestSfi()
-
-
-supported_options = {
- 'OAR' : TestOAR,
- 'LDAP': TestLdap,
- 'driver': TestIotlabDriver,
- 'shell': TestIotlabshell,
- 'sql':TestSQL,
- 'all' : RunAll, }
-
-def main():
- opts = parse_options()
- print opts
- for opt in opts:
- supported_options[opt](opts[opt])
-
-
-if __name__ == "__main__":
- main()
+++ /dev/null
-#!/usr/bin/env python
-import sys
-import os
-from sfa.iotlab.LDAPapi import LDAPapi
-from difflib import SequenceMatcher
-
-def parse_options():
-
- #arguments supplied
- if len(sys.argv) > 1 :
- options_list = sys.argv[1:]
- print options_list
- rspec_rep = options_list[0]
- return rspec_rep
- else:
- print "Must supply Rspecs directory ", sys.argv[1:]
- return
-
-
-rspec_dir = parse_options()
-print "DIRECTORY SUPPLIED" , rspec_dir
-rspec_filename_list = ['firexp_avakian_slice_iotlab.rspec',
-'firexp_iotlab_slice_iotlab.rspec',
-'iotlab_avakian_slice_iotlab2.rspec',
-'iotlab_avakian_slice_plab.rspec',
-'firexp_iotlab_slice_all.rspec',
-'iotlab_avakian_slice_all.rspec',
-'iotlab_avakian_slice_iotlab.rspec',
-'iotlab_user_slice_iotlab.rspec',
-'test_delete_all_leases.rspec']
-
-rspec_filename_dict = {
- ('iotlab_avakian', 'iotlab', 'allocate' ):
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_iotlab.rspec',
-
- ('iotlab_avakian', 'iotlab2', 'allocate'):
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_iotlab2.rspec',
-
- ('firexp_user','iotlab', 'allocate'):
- "sfi.py allocate firexp.flab.iotlab_slice " + rspec_dir + \
- 'firexp_iotlab_slice_iotlab.rspec',
-
- ('firexp_user', 'all', 'allocate'):
- "sfi.py allocate firexp.flab.iotlab_slice "+ rspec_dir + \
- 'firexp_iotlab_slice_all.rspec',
-
- ('iotlab_user', 'iotlab', 'allocate'):
- "sfi.py allocate iotlab.user_slice "+ rspec_dir + \
- 'iotlab_user_slice_iotlab.rspec',
-
- ('firexp_avakian','iotlab', 'allocate'):
- "sfi.py allocate firexp.flab.avakian_slice " + rspec_dir + \
- 'firexp_avakian_slice_iotlab.rspec',
-
- ('iotlab_avakian', 'plab', 'allocate') :
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_plab.rspec',
-
- ('iotlab_avakian', 'all', 'allocate') :
- "sfi.py allocate iotlab.avakian_slice " + rspec_dir + \
- 'iotlab_avakian_slice_all.rspec',
-
- ('iotlab_avakian', 'iotlab', 'provision' ):
- "sfi.py provision iotlab.avakian_slice",
-
- ('iotlab_avakian', 'iotlab2', 'provision'):
- "sfi.py provision iotlab.avakian_slice",
-
- ('firexp_user','iotlab', 'provision'):
- "sfi.py provision firexp.flab.iotlab_slice",
-
- ('firexp_user', 'all', 'provision'):
- "sfi.py provision firexp.flab.iotlab_slice",
-
- ('iotlab_user', 'iotlab', 'provision'):
- "sfi.py provision iotlab.user_slice",
-
- ('firexp_avakian','iotlab', 'provision'):
- "sfi.py provision firexp.flab.avakian_slice",
-
- ('iotlab_avakian', 'plab', 'provision') :
- "sfi.py provision iotlab.avakian_slice",
-
- ('iotlab_avakian', 'all', 'provision') :
- "sfi.py provision iotlab.avakian_slice",
-
- ('iotlab_avakian', 'iotlab', 'describe' ):
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_iotlab.rspec",
-
- ('iotlab_avakian', 'iotlab2', 'describe'):
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_iotlab2.rspec",
-
- ('firexp_user','iotlab', 'describe'):
- "sfi.py describe firexp.flab.iotlab_slice firexp_iotlab_slice_iotlab.rspec",
-
- ('firexp_user', 'all', 'describe'):
- "sfi.py describe firexp.flab.iotlab_slice firexp_iotlab_slice_all.rspec",
-
- ('iotlab_user', 'iotlab', 'describe'):
- "sfi.py describe iotlab.user_slice iotlab_user_slice_iotlab.rspec",
-
- ('firexp_avakian','iotlab', 'describe'):
- "sfi.py describe firexp.flab.avakian_slice firexp_avakian_slice_iotlab.rspec",
-
- ('iotlab_avakian', 'plab', 'describe') :
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_plab.rspec",
-
- ('iotlab_avakian', 'all', 'describe') :
- "sfi.py describe iotlab.avakian_slice iotlab_avakian_slice_all.rspec"
- }
-
-print rspec_filename_dict
-# check if the firexp user (uid user) is already in LDAP
-# in this is the case, delete it :
-ldap_server = LDAPapi()
-dn = 'uid=' + 'user' + ',' + ldap_server.baseDN
-result = ldap_server.LdapSearch('(uid=user)', [])
-
-if result != []:
- retval = ldap_server.LdapDelete(dn)
- print "deleting firexp user : ", retval
-
-# Change the sfi config file to be able to start the experiment on the federated
-# testbed with another identity and another slice
-print "config sfi"
-with open ("/root/.sfi/sfi_config", "r") as sfi_config:
- sfi_config_txt = [line for line in sfi_config]
-
-with open("/root/.sfi/sfi_config_iotlab", "r") as sfi_config_iotlab:
- sfi_config_iotlab_txt = [line for line in sfi_config_iotlab]
-
-with open("/root/.sfi/sfi_config_firexp", "r") as sfi_config_firexp:
- sfi_config_firexp_txt = [line for line in sfi_config_firexp]
-# check that we are using the iotlab sfi configuration
-result1 = SequenceMatcher(None, sfi_config_txt, sfi_config_iotlab_txt)
-
-result2 = SequenceMatcher(None, sfi_config_txt, sfi_config_firexp_txt)
-
-if result1.ratio() != 1.0:
- os.system('cp /root/.sfi/sfi_config_iotlab /root/.sfi/sfi_config')
-
-os.system('cat /root/.sfi/sfi_config')
-os.system('rm /root/tests_rspecs/iotlab_devlille_OUTPUT.rspec')
-
-print " ================= SFI.PY LIST IOTLAB ============="
-os.system('sfi.py list iotlab')
-
-
-print " ================= SFI.PY RESOURCES ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources')
-
-
-print " ================= SFI.PY RESOURCES -R IOTLAB ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -r iotlab')
-
-
-print " ================= SFI.PY RESOURCES -L ALL ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l all')
-
-print " ================= SFI.PY RESOURCES -R IOTLAB -L ALL ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -r iotlab -l all')
-
-# print " ================= SFI.PY RESOURCES -O output rspec ==========="
-# os.system('sfi.py resources -o /root/tests_rspecs/iotlab_devlille_OUTPUT.rspec')
-
-print " ================= SFI.PY RESOURCES -L LEASES ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l leases')
-
-
-print " ================= SFI.PY SHOW USER ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py show iotlab.avakian')
-
-print " ================= SFI.PY SHOW NODE ============="
-os.system('sfi.py show iotlab.m3-3.devgrenoble.iot-lab.info')
-
-
-
-print " ================= SFI.PY STATUS SLICE ============="
-os.system('sfi.py status iotlab.avakian_slice')
-
-print " ================= SFI.PY ALLOCATE SLICE on iotlab only ============="
-raw_input("Press Enter to continue...")
-os.system( rspec_filename_dict[('iotlab_avakian','iotlab' , 'allocate')])
-
-
-print " ================= SFI.PY PROVISION SLICE on iotlab only ============="
-raw_input("Press Enter to continue...")
-os.system( rspec_filename_dict[('iotlab_avakian','iotlab' , 'provision')])
-
-
-print " ================= SFI.PY DESCRIBE SLICE on iotlab only ============="
-raw_input("Press Enter to continue...")
-os.system( rspec_filename_dict[('iotlab_avakian','iotlab' , 'describe')])
-
-
-print " ================= SFI.PY RESOURCES -l all iotlab.avakian_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l all iotlab.avakian_slice')
-
-
-print " ================= SFI.PY DELETE SLICE ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py delete iotlab.avakian_slice')
-
-
-print " ================= SFI.PY ALLOCATE SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('iotlab_avakian','all', 'allocate')])
-
-
-print " ================= SFI.PY RESOURCES -l all -r iotlab iotlab.avakian_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l all -r iotlab iotlab.avakian_slice')
-
-
-print " =================SFI.PY RESOURCES -L LEASES -R IOTLAB ============== "
-os.system('sfi.py resources -r iotlab -l leases')
-
-
-print " ================= SFI.PY DELETE SLICE ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py delete iotlab.avakian_slice')
-
-print "\r\n \r\n"
-
-print " *********changing to firexp sfi config ***************"
-os.system('cp /root/.sfi/sfi_config_firexp /root/.sfi/sfi_config')
-
-
-
-print " ================= SFI.PY ALLOCATE SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('firexp_user','all', 'allocate')])
-
-print " ================= SFI.PY DESCRIBE SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('firexp_user','all', 'describe')])
-
-print " ================= SFI.PY PROVISION SLICE on iotlab and firexp ============="
-raw_input("Press Enter to continue...")
-os.system(rspec_filename_dict[('firexp_user','all', 'provision')])
-
-
-print " ================= SFI.PY SHOW SLICE ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py show firexp.flab.iotlab_slice')
-
-
-print " ================= SFI.PY RESOURCES -l leases firexp.flab.iotlab_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources -l leases firexp.flab.iotlab_slice')
-
-
-print " ================= SFI.PY RESOURCES firexp.flab.iotlab_slice ============="
-raw_input("Press Enter to continue...")
-os.system('sfi.py resources firexp.flab.iotlab_slice')
-
-
-
-
+++ /dev/null
-Rspec file names
-======================
-Rspec file names are constructed as follows :
- slice name used in this rspec + network in which the reserved nodes are
-
-Networks can be : iotlab, plab, all (iotlab + plab)
-
-Slices and users
-=================
-user:
-login iotlab : user
-hrn iotlab: iotlab.user
-hrn firexp: firexp.flab.iotlab_user
-slice iotlab: iotlab.user_slice
-slice firexp : firexp.flab.iotlab_slice
-
-
-This special test user comes from Firexp and is considered as an
-external user coming from a federated testbedd for Iotlab.
-
-user:
-login iotlab: avakian
-slice iotlab: iotlab.avakian_slice
-hrn firexp : firexp.flab.avakian (?)
-slice firexp : firexp.flab.avakian_slice (?)
-
-This user comes from iotlab.
-
-
-Leases
-======
-
-The starting time of the leases in those RSpec files are
-usually set to be in 2014, so that we don't have to keep the
-date in mind and check that we are not scheduling a lease
-in the past.
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+avakian_slice" start_time="1386765700" duration="10">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
- <network name="plab" >
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>TRUE</exclusive>
- <granularity grain="3600"/>
- <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1412938800" duration="1">
- <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
- </lease>
- </network>
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1412938800" duration="60">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
- </network>
-</RSpec>
-
-
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="node5.devlille.iotlab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="1"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="1"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+firexp:flab+slice+iotlab_slice" start_time="1405078900" duration="600">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
- <network name="plab" >
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>TRUE</exclusive>
- <granularity grain="3600"/>
- <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1410346800" duration="1">
- <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
- </lease>
- </network>
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1410346800" duration="60">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
- </network>
-</RSpec>
-
-
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2014-52-27T15:14:10Z" generated="2013-05-19T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+senslab+authority+sa">
- <hostname>a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="60"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+ple:upmc+slice+myslicedemo" start_time="1400604923" duration="20">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-8.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-8.devlille.iot-lab.info.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-8.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-5.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-5.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-5.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- </network>
- <network name="plab">
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>FALSE</exclusive>
- <interface component_id="urn:publicid:IDN+plab+interface+node1:eth0" ipv4="138.96.116.135"/>
- <arch>x86_64</arch>
- <fcdistro>f14</fcdistro>
- <pldistro>onelab</pldistro>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1405078900" duration="10">
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-5.devlille.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-8.devlille.iot-lab.info"/>
- </lease>
- </network>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2014-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="plab">
- <node component_manager_id="urn:publicid:IDN+plab+authority+cm" component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr" boot_state="boot" component_name="effet.pl.sophia.inria.fr" site_id="urn:publicid:IDN+plab:plab+authority+sa">
- <hostname>effet.pl.sophia.inria.fr</hostname>
- <exclusive>TRUE</exclusive>
- <granularity grain="3600"/>
- <interface component_id="urn:publicid:IDN+plab+interface+node2:eth0" ipv4="138.96.116.135"/>
- <hrn>planetlab.test.plab.effet</hrn>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+avakian_slice" start_time="1405080000" duration="2">
- <node component_id="urn:publicid:IDN+plab:plab+node+effet.pl.sophia.inria.fr"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-02-27T15:14:10Z" generated="2013-02-27T14:14:10Z">
- <network name="iotlab">
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info" boot_state="Alive" component_name="wsn430-12.devlille.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>wsn430-12.devlille.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- <node component_manager_id="urn:publicid:IDN+iotlab+authority+sa" component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info" boot_state="Alive" component_name="a8-11.devgrenoble.iot-lab.info" site_id="urn:publicid:IDN+iotlab+authority+sa">
- <hostname>"a8-11.devgrenoble.iot-lab.info</hostname>
- <location country="France"/>
- <exclusive>TRUE</exclusive>
- <granularity grain="600"/>
- <sliver/>
- </node>
- </network>
- <lease slice_id="urn:publicid:IDN+iotlab+slice+user_slice" start_time="1405078900" duration="601">
- <node component_id="urn:publicid:IDN+iotlab+node+a8-11.devgrenoble.iot-lab.info"/>
- <node component_id="urn:publicid:IDN+iotlab+node+wsn430-12.devlille.iot-lab.info"/>
- </lease>
-</RSpec>
+++ /dev/null
-<?xml version="1.0"?>
-<RSpec type="SFA" expires="2013-09-05T13:30:10Z" generated="2013-09-05T12:30:10Z">
- <network name="plab" >
- </network>
- <network name="iotlab">
- </network>
-</RSpec>
-
-