-Copyright (c) 2008 Board of Trustees, Princeton University
+Copyright (c) 2008-2013 Board of Trustees, Princeton University
+Copyright (c) 2010-2013 INRIA, Institut National d'Informatique et Automatique
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and/or hardware specification (the “Work”) to
#
# overwritten by the specfile
DESTDIR="/"
-
+PREFIX=/usr
##########
all: python wsdl
sed -e "s,@VERSIONTAG@,$(VERSIONTAG),g" -e "s,@SCMURL@,$(SCMURL),g" sfa/util/version.py.in > $@
xmlbuilder-install:
- cd xmlbuilder-0.9 && python setup.py install --root=$(DESTDIR) && cd -
+ cd xmlbuilder-0.9 && python setup.py install --prefix=$(PREFIX) --root=$(DESTDIR) && cd -
rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info
# postinstall steps - various cleanups and tweaks for a nicer rpm
python-install:
- python setup.py install --root=$(DESTDIR)
+ python setup.py install --prefix=$(PREFIX) --root=$(DESTDIR)
chmod 444 $(DESTDIR)/etc/sfa/default_config.xml
rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info
rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/migrations
.PHONY: wsdl wsdl-install wsdl-clean
+##########
+debian: version
+ $(MAKE) -f Makefile.debian debian
+debian.clean:
+ $(MAKE) -f Makefile.debian clean
+
##########
tests-install:
mkdir -p $(DESTDIR)/usr/share/sfa/tests
synclib: synccheck
+$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/lib\*/python2.\*/site-packages/
+synclibdeb: synccheck
+ +$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/share/pyshared/
syncbin: synccheck
+$(RSYNC) $(BINS) $(SSHURL)/usr/bin/
syncinit: synccheck
# full-fledged
sync: synclib syncbin syncinit syncconfig syncrestart
+syncdeb: synclibdeb syncbin syncinit syncconfig syncrestart
# 99% of the time this is enough
syncfast: synclib syncrestart
--- /dev/null
+PROJECT=sfa
+VERSION=$(shell python -c "from sfa.util.version import version_tag; print version_tag" | sed -e s,-,.,)
+DATE=$(shell date -u +"%a, %d %b %Y %T")
+DEBIAN_TARBALL=../$(PROJECT)_$(VERSION).orig.tar.bz2
+
+debian: debian/changelog debian.source debian.package
+
+force:
+
+debian/changelog: debian/changelog.in
+ sed -e "s|@VERSION@|$(VERSION)|" -e "s|@DATE@|$(DATE)|" debian/changelog.in > debian/changelog
+
+# TARBALL is passed from the main build (/build/Makefile) to the 'make debian' call
+debian.source: force
+ rsync -a $(TARBALL) $(DEBIAN_TARBALL)
+
+debian.package:
+ debuild -uc -us -b
+
+debian.clean:
+ $(MAKE) -f debian/rules clean
+ rm -rf build/ MANIFEST ../*.tar.gz ../*.dsc ../*.build
+ find . -name '*.pyc' -delete
+
all: $(ALL)
-ple: auto-ple-reg auto-ple-sa-lr.out
+ple: auto-ple-reg auto-ple-sa-lr
####################
define bundle_scan_target
elif kind=="credential":
cred = Credential(filename = filename)
print '--------------------',filename,'IS A',kind
- cred.dump(dump_parents = options.dump_parents)
+ cred.dump(dump_parents = options.dump_parents, show_xml=options.show_xml)
if options.extract_gids:
print '--------------------',filename,'embedded GIDS'
extract_gids(cred, extract_parents = options.dump_parents)
parser.add_option("-g", "--extract-gids", action="store_true", dest="extract_gids", default=False, help="Extract GIDs from credentials")
parser.add_option("-p", "--dump-parents", action="store_true", dest="dump_parents", default=False, help="Show parents")
parser.add_option("-e", "--extensions", action="store_true", dest="show_extensions", default="False", help="Show certificate extensions")
- parser.add_option("-v", "--verbose", action='count', dest='verbose', default=0)
+ parser.add_option("-v", "--verbose", action='count', dest='verbose', default=0, help="More and more verbose")
+ parser.add_option("-x", "--xml", action='store_true', dest='show_xml', default=False, help="dumps xml tree (cred. only)")
(options, args) = parser.parse_args()
logger.setLevelFromOptVerbose(options.verbose)
</variablelist>
</category>
+ <!-- ======================================== -->
+ <category id="sfa_nitos">
+ <name></name>
+ <description>The settings that tell this SFA instance how to interact with the NITOS testbed.</description>
+
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>http://195.251.17.239:8080/RPC2</value>
+ <description>URL for the NITOS Scheduler xmlrpc API</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+ <category id="sfa_dummy">
+ <name></name>
+ <description>The settings for using SFA with a demmy testbed.</description>
+
+ <variablelist>
+ <variable id="url" type="string">
+ <name>XMLRPC URL</name>
+ <value>http://127.0.0.1:8080</value>
+ <description>URL for the Dummy Testbed xmlrpc API</description>
+ </variable>
+ </variablelist>
+ </category>
+
+ <!-- ======================================== -->
+
</variables>
<comps>
"SFA_AGGREGATE_HOST",
"SFA_SM_HOST",
"SFA_DB_HOST",
- "SFA_PLC_URL",
- "SFA_PLC_USER",
- "SFA_PLC_PASSWORD",
]
+flavour_xml_section_hash = { \
+ 'pl':'sfa_plc',
+ 'openstack':'sfa_nova',
+ 'fd':'sfa_federica',
+ 'nitos':'sfa_nitos',
+ 'dummy':'sfa_dummy',
+ }
configuration={ \
'name':'sfa',
'service':"sfa",
(service,service))
elif command in "uU":
global usual_variables
+ global flavour_xml_section_hash
try:
for varname in usual_variables:
(category,variable) = cdef.locate_varname(varname)
if not (category is None and variable is None):
prompt_variable(cdef, cread, cwrite, category, variable, False)
+
+ # set the driver variable according to the already set flavour
+ generic_flavour = cwrite.items('sfa')[0][1]
+ for section in cdef.sections():
+ if generic_flavour in flavour_xml_section_hash and flavour_xml_section_hash[generic_flavour] == section:
+ for item in cdef.items(section):
+ category = section
+ variable = item[0]
+ prompt_variable(cdef, cread, cwrite, category, variable, False)
+ break
+
except Exception, inst:
if (str(inst) != 'BailOut'):
raise
--- /dev/null
+sfa (@VERSION@) UNRELEASED; urgency=low
+
+ * Initial release.
+
+ -- Thierry Parmentelat <thierry.parmentelat@inria.fr> @DATE@ +0000
--- /dev/null
+Source: sfa
+Maintainer: Thierry Parmentelat <Thierry.Parmentelat@inria.fr>
+Section: misc
+Priority: optional
+Standards-Version: 3.9.2
+Build-Depends: devscripts, debhelper (>=7.0.50~), debconf, dpatch, python-setuptools, make
+
+Package: sfa
+Architecture: any
+Depends: postgresql (>= 8.2), python-psycopg2, python-sqlalchemy, python-migrate, uuid-runtime
+Description: Server-side for SFA, generic implementation derived from PlanetLab
+
+Package: sfa-common
+Architecture: any
+Depends: python (>= 2.7), python-openssl (>= 0.7), python-m2crypto, python-dateutil, python-lxml, python-libxslt1, python-zsi, xmlsec1
+Description: Python libraries for SFA, generic implementation derived from PlanetLab
+
+Package: sfa-flashpolicy
+Architecture: any
+Depends: sfa-common
+Description: SFA support for flash clients
+
+Package: sfa-client
+Architecture: any
+Depends: sfa-common
+Description: sfi, the SFA experimenter-side CLI
+
+Package: sfa-plc
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around MyPLC
+
+Package: sfa-federica
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around Federica
+
+Package: sfa-nitos
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around NITOS
+
+Package: sfa-senslab
+Architecture: any
+Depends: sfa-common, python-passlib, python-ldap
+Description: the SFA layer around SensLab
+
+Package: sfa-dummy
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around a Dummy Testbed
+
+Package: sfa-sfatables
+Architecture: any
+Depends: sfa-common
+Description: sfatables policy tool for SFA
+
+Package: sfa-xmlbuilder
+Architecture: any
+Provides: python-xmlbuilder
+Description: third-party xmlbuilder tool
+
+Package: sfa-tests
+Architecture: any
+Depends: sfa-common
+Description: unit tests suite for SFA
+
--- /dev/null
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+%:
+ dh $@
--- /dev/null
+etc/sfa/sfi_config
+usr/bin/sfi*.py*
+usr/bin/sfi
+usr/bin/get*.py*
+usr/bin/setRecord.py*
+usr/bin/sfascan.py*
+usr/bin/sfascan
+usr/bin/sfadump.py*
--- /dev/null
+usr/lib*/python*/site-packages/sfa/__init__.py*
+usr/lib*/python*/site-packages/sfa/trust
+usr/lib*/python*/site-packages/sfa/storage
+usr/lib*/python*/site-packages/sfa/util
+usr/lib*/python*/site-packages/sfa/server
+usr/lib*/python*/site-packages/sfa/methods
+usr/lib*/python*/site-packages/sfa/generic
+usr/lib*/python*/site-packages/sfa/managers
+usr/lib*/python*/site-packages/sfa/importer
+usr/lib*/python*/site-packages/sfa/rspecs
+usr/lib*/python*/site-packages/sfa/client
--- /dev/null
+usr/lib*/python*/site-packages/sfa/dummy
--- /dev/null
+usr/lib*/python*/site-packages/sfa/federica
--- /dev/null
+usr/bin/sfa_flashpolicy.py*
+etc/sfa/sfa_flashpolicy_config.xml
--- /dev/null
+usr/lib*/python*/site-packages/sfa/nitos
--- /dev/null
+usr/lib*/python*/site-packages/sfa/planetlab
+usr/lib*/python*/site-packages/sfa/openstack
+etc/sfa/pl.rng
+etc/sfa/credential.xsd
+etc/sfa/top.xsd
+etc/sfa/sig.xsd
+etc/sfa/xml.xsd
+etc/sfa/protogeni-rspec-common.xsd
+etc/sfa/topology
--- /dev/null
+usr/lib*/python*/site-packages/sfa/senslab
--- /dev/null
+etc/sfatables/*
+usr/bin/sfatables
+usr/lib*/python*/site-packages/sfatables
--- /dev/null
+usr/share/sfa/tests
--- /dev/null
+usr/lib*/python*/site-packages/xmlbuilder
--- /dev/null
+usr/bin/sfa-start.py*
+usr/bin/sfaadmin.py*
+usr/bin/sfaadmin
+usr/bin/keyconvert.py*
+usr/bin/sfa-config-tty
+usr/bin/sfa-config
+etc/sfa/default_config.xml
+etc/sfa/aggregates.xml
+etc/sfa/registries.xml
+etc/init.d/sfa
+etc/init.d/functions.sfa
+usr/share/sfa/migrations
+usr/share/sfa/examples
+var/www/html/wsdl/*.wsdl
--- /dev/null
+#!/bin/bash
+# this file is not used yet
+# I take it the idea was to implement
+# something like chkconfig sfa on
+
+update-rc.d sfa defaults
--- /dev/null
+3.0 (quilt)
--- /dev/null
+# -*-Shell-script-*-
+#
+# Thierry, jan 17 2013
+# this file was put together by Jordan to provide the same interface as
+# /etc/init.d/functions on fedora systems
+# (probably is extracted from one of the fedora releases as is, not sure about that)
+#
+# we unconditionnally ship this as /etc/init.d/functions.sfa,
+# and then our own initscript (init.d/sfa) does source that
+# conditionnally, i.e. when run on debian systems
+####################
+#
+# functions This file contains functions to be used by most or all
+# shell scripts in the /etc/init.d directory.
+#
+
+TEXTDOMAIN=initscripts
+
+# Make sure umask is sane
+umask 022
+
+# Set up a default search path.
+PATH="/sbin:/usr/sbin:/bin:/usr/bin"
+export PATH
+
+if [ $PPID -ne 1 -a -z "$SYSTEMCTL_SKIP_REDIRECT" ] && \
+ ( /bin/mountpoint -q /cgroup/systemd || /bin/mountpoint -q /sys/fs/cgroup/systemd ) ; then
+ case "$0" in
+ /etc/init.d/*|/etc/rc.d/init.d/*)
+ _use_systemctl=1
+ ;;
+ esac
+fi
+
+systemctl_redirect () {
+ local s
+ local prog=${1##*/}
+ local command=$2
+
+ case "$command" in
+ start)
+ s=$"Starting $prog (via systemctl): "
+ ;;
+ stop)
+ s=$"Stopping $prog (via systemctl): "
+ ;;
+ reload|try-reload)
+ s=$"Reloading $prog configuration (via systemctl): "
+ ;;
+ restart|try-restart|condrestart)
+ s=$"Restarting $prog (via systemctl): "
+ ;;
+ esac
+
+ action "$s" /bin/systemctl $command "$prog.service"
+}
+
+# Get a sane screen width
+[ -z "${COLUMNS:-}" ] && COLUMNS=80
+
+#if [ -z "${CONSOLETYPE:-}" ]; then
+# if [ -r "/dev/stderr" ]; then
+# CONSOLETYPE="$(/sbin/consoletype < /dev/stderr)"
+# else
+# CONSOLETYPE="$(/sbin/consoletype)"
+# fi
+#fi
+
+if [ -z "${NOLOCALE:-}" ] && [ -z "${LANGSH_SOURCED:-}" ] && [ -f /etc/sysconfig/i18n ] ; then
+ . /etc/profile.d/lang.sh 2>/dev/null
+ # avoid propagating LANGSH_SOURCED any further
+ unset LANGSH_SOURCED
+fi
+
+# Read in our configuration
+if [ -z "${BOOTUP:-}" ]; then
+ if [ -f /etc/sysconfig/init ]; then
+ . /etc/sysconfig/init
+ else
+ # This all seem confusing? Look in /etc/sysconfig/init,
+ # or in /usr/doc/initscripts-*/sysconfig.txt
+ BOOTUP=color
+ RES_COL=60
+ MOVE_TO_COL="echo -en \\033[${RES_COL}G"
+ SETCOLOR_SUCCESS="echo -en \\033[1;32m"
+ SETCOLOR_FAILURE="echo -en \\033[1;31m"
+ SETCOLOR_WARNING="echo -en \\033[1;33m"
+ SETCOLOR_NORMAL="echo -en \\033[0;39m"
+ LOGLEVEL=1
+ fi
+ if [ "$CONSOLETYPE" = "serial" ]; then
+ BOOTUP=serial
+ MOVE_TO_COL=
+ SETCOLOR_SUCCESS=
+ SETCOLOR_FAILURE=
+ SETCOLOR_WARNING=
+ SETCOLOR_NORMAL=
+ fi
+fi
+
+# Interpret escape sequences in an fstab entry
+fstab_decode_str() {
+ fstab-decode echo "$1"
+}
+
+# Check if any of $pid (could be plural) are running
+checkpid() {
+ local i
+
+ for i in $* ; do
+ [ -d "/proc/$i" ] && return 0
+ done
+ return 1
+}
+
+__readlink() {
+ ls -bl "$@" 2>/dev/null| awk '{ print $NF }'
+}
+
+__fgrep() {
+ s=$1
+ f=$2
+ while read line; do
+ if strstr "$line" "$s"; then
+ echo $line
+ return 0
+ fi
+ done < $f
+ return 1
+}
+
+# __umount_loop awk_program fstab_file first_msg retry_msg umount_args
+# awk_program should process fstab_file and return a list of fstab-encoded
+# paths; it doesn't have to handle comments in fstab_file.
+__umount_loop() {
+ local remaining sig=
+ local retry=3 count
+
+ remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
+ while [ -n "$remaining" -a "$retry" -gt 0 ]; do
+ if [ "$retry" -eq 3 ]; then
+ action "$3" fstab-decode umount $5 $remaining
+ else
+ action "$4" fstab-decode umount $5 $remaining
+ fi
+ count=4
+ remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
+ while [ "$count" -gt 0 ]; do
+ [ -z "$remaining" ] && break
+ count=$(($count-1))
+ # jordan # usleep 500000
+ sleep 0.5
+ remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
+ done
+ [ -z "$remaining" ] && break
+ fstab-decode /sbin/fuser -k -m $sig $remaining >/dev/null
+ sleep 3
+ retry=$(($retry -1))
+ sig=-9
+ done
+}
+
+# Similar to __umount loop above, specialized for loopback devices
+__umount_loopback_loop() {
+ local remaining devremaining sig=
+ local retry=3
+
+ remaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $2}' /proc/mounts)
+ devremaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $1}' /proc/mounts)
+ while [ -n "$remaining" -a "$retry" -gt 0 ]; do
+ if [ "$retry" -eq 3 ]; then
+ action $"Unmounting loopback filesystems: " \
+ fstab-decode umount $remaining
+ else
+ action $"Unmounting loopback filesystems (retry):" \
+ fstab-decode umount $remaining
+ fi
+ for dev in $devremaining ; do
+ losetup $dev > /dev/null 2>&1 && \
+ action $"Detaching loopback device $dev: " \
+ losetup -d $dev
+ done
+ remaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $2}' /proc/mounts)
+ devremaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $1}' /proc/mounts)
+ [ -z "$remaining" ] && break
+ fstab-decode /sbin/fuser -k -m $sig $remaining >/dev/null
+ sleep 3
+ retry=$(($retry -1))
+ sig=-9
+ done
+}
+
+# __proc_pids {program} [pidfile]
+# Set $pid to pids from /var/run* for {program}. $pid should be declared
+# local in the caller.
+# Returns LSB exit code for the 'status' action.
+__pids_var_run() {
+ local base=${1##*/}
+ local pid_file=${2:-/var/run/$base.pid}
+
+ pid=
+ if [ -f "$pid_file" ] ; then
+ local line p
+
+ [ ! -r "$pid_file" ] && return 4 # "user had insufficient privilege"
+ while : ; do
+ read line
+ [ -z "$line" ] && break
+ for p in $line ; do
+ [ -z "${p//[0-9]/}" ] && [ -d "/proc/$p" ] && pid="$pid $p"
+ done
+ done < "$pid_file"
+
+ if [ -n "$pid" ]; then
+ return 0
+ fi
+ return 1 # "Program is dead and /var/run pid file exists"
+ fi
+ return 3 # "Program is not running"
+}
+
+# Output PIDs of matching processes, found using pidof
+__pids_pidof() {
+ pidof -c -o $$ -o $PPID -o %PPID -x "$1" || \
+ pidof -c -o $$ -o $PPID -o %PPID -x "${1##*/}"
+# jordan # pidof -c -m -o $$ -o $PPID -o %PPID -x "$1" || \
+# jordan # pidof -c -m -o $$ -o $PPID -o %PPID -x "${1##*/}"
+}
+
+
+# A function to start a program.
+daemon() {
+ # Test syntax.
+ local gotbase= force= nicelevel corelimit
+ local pid base= user= nice= bg= pid_file=
+ local cgroup=
+ nicelevel=0
+ while [ "$1" != "${1##[-+]}" ]; do
+ case $1 in
+ '') echo $"$0: Usage: daemon [+/-nicelevel] {program}"
+ return 1;;
+ --check)
+ base=$2
+ gotbase="yes"
+ shift 2
+ ;;
+ --check=?*)
+ base=${1#--check=}
+ gotbase="yes"
+ shift
+ ;;
+ --user)
+ user=$2
+ shift 2
+ ;;
+ --user=?*)
+ user=${1#--user=}
+ shift
+ ;;
+ --pidfile)
+ pid_file=$2
+ shift 2
+ ;;
+ --pidfile=?*)
+ pid_file=${1#--pidfile=}
+ shift
+ ;;
+ --force)
+ force="force"
+ shift
+ ;;
+ [-+][0-9]*)
+ nice="nice -n $1"
+ shift
+ ;;
+ *) echo $"$0: Usage: daemon [+/-nicelevel] {program}"
+ return 1;;
+ esac
+ done
+
+ # Save basename.
+ [ -z "$gotbase" ] && base=${1##*/}
+
+ # See if it's already running. Look *only* at the pid file.
+ __pids_var_run "$base" "$pid_file"
+
+ [ -n "$pid" -a -z "$force" ] && return
+
+ # make sure it doesn't core dump anywhere unless requested
+ corelimit="ulimit -S -c ${DAEMON_COREFILE_LIMIT:-0}"
+
+ # if they set NICELEVEL in /etc/sysconfig/foo, honor it
+ [ -n "${NICELEVEL:-}" ] && nice="nice -n $NICELEVEL"
+
+ # if they set CGROUP_DAEMON in /etc/sysconfig/foo, honor it
+ if [ -n "${CGROUP_DAEMON}" ]; then
+ if [ ! -x /bin/cgexec ]; then
+ echo -n "Cgroups not installed"; warning
+ echo
+ else
+ cgroup="/bin/cgexec";
+ for i in $CGROUP_DAEMON; do
+ cgroup="$cgroup -g $i";
+ done
+ fi
+ fi
+
+ # Echo daemon
+ [ "${BOOTUP:-}" = "verbose" -a -z "${LSB:-}" ] && echo -n " $base"
+
+ # And start it up.
+ if [ -z "$user" ]; then
+ $cgroup $nice /bin/bash -c "$corelimit >/dev/null 2>&1 ; $*"
+ else
+ $cgroup $nice runuser -s /bin/bash $user -c "$corelimit >/dev/null 2>&1 ; $*"
+ fi
+
+ [ "$?" -eq 0 ] && success $"$base startup" || failure $"$base startup"
+}
+
+# A function to stop a program.
+killproc() {
+ local RC killlevel= base pid pid_file= delay
+
+ RC=0; delay=3
+ # Test syntax.
+ if [ "$#" -eq 0 ]; then
+ echo $"Usage: killproc [-p pidfile] [ -d delay] {program} [-signal]"
+ return 1
+ fi
+ if [ "$1" = "-p" ]; then
+ pid_file=$2
+ shift 2
+ fi
+ if [ "$1" = "-d" ]; then
+ delay=$2
+ shift 2
+ fi
+
+
+ # check for second arg to be kill level
+ [ -n "${2:-}" ] && killlevel=$2
+
+ # Save basename.
+ base=${1##*/}
+
+ # Find pid.
+ __pids_var_run "$1" "$pid_file"
+ RC=$?
+ if [ -z "$pid" ]; then
+ if [ -z "$pid_file" ]; then
+ pid="$(__pids_pidof "$1")"
+ else
+ [ "$RC" = "4" ] && { failure $"$base shutdown" ; return $RC ;}
+ fi
+ fi
+
+ # Kill it.
+ if [ -n "$pid" ] ; then
+ [ "$BOOTUP" = "verbose" -a -z "${LSB:-}" ] && echo -n "$base "
+ if [ -z "$killlevel" ] ; then
+ if checkpid $pid 2>&1; then
+ # TERM first, then KILL if not dead
+ kill -TERM $pid >/dev/null 2>&1
+ sleep 0.1
+ # jordan # usleep 100000
+ if checkpid $pid && sleep 1 &&
+ checkpid $pid && sleep $delay &&
+ checkpid $pid ; then
+ kill -KILL $pid >/dev/null 2>&1
+ sleep 0.1
+ # jordan # usleep 100000
+ fi
+ fi
+ checkpid $pid
+ RC=$?
+ [ "$RC" -eq 0 ] && failure $"$base shutdown" || success $"$base shutdown"
+ RC=$((! $RC))
+ # use specified level only
+ else
+ if checkpid $pid; then
+ kill $killlevel $pid >/dev/null 2>&1
+ RC=$?
+ [ "$RC" -eq 0 ] && success $"$base $killlevel" || failure $"$base $killlevel"
+ elif [ -n "${LSB:-}" ]; then
+ RC=7 # Program is not running
+ fi
+ fi
+ else
+ if [ -n "${LSB:-}" -a -n "$killlevel" ]; then
+ RC=7 # Program is not running
+ else
+ failure $"$base shutdown"
+ RC=0
+ fi
+ fi
+
+ # Remove pid file if any.
+ if [ -z "$killlevel" ]; then
+ rm -f "${pid_file:-/var/run/$base.pid}"
+ fi
+ return $RC
+}
+
+# A function to find the pid of a program. Looks *only* at the pidfile
+pidfileofproc() {
+ local pid
+
+ # Test syntax.
+ if [ "$#" = 0 ] ; then
+ echo $"Usage: pidfileofproc {program}"
+ return 1
+ fi
+
+ __pids_var_run "$1"
+ [ -n "$pid" ] && echo $pid
+ return 0
+}
+
+# A function to find the pid of a program.
+pidofproc() {
+ local RC pid pid_file=
+
+ # Test syntax.
+ if [ "$#" = 0 ]; then
+ echo $"Usage: pidofproc [-p pidfile] {program}"
+ return 1
+ fi
+ if [ "$1" = "-p" ]; then
+ pid_file=$2
+ shift 2
+ fi
+ fail_code=3 # "Program is not running"
+
+ # First try "/var/run/*.pid" files
+ __pids_var_run "$1" "$pid_file"
+ RC=$?
+ if [ -n "$pid" ]; then
+ echo $pid
+ return 0
+ fi
+
+ [ -n "$pid_file" ] && return $RC
+ __pids_pidof "$1" || return $RC
+}
+
+status() {
+ local base pid lock_file= pid_file=
+
+ # Test syntax.
+ if [ "$#" = 0 ] ; then
+ echo $"Usage: status [-p pidfile] {program}"
+ return 1
+ fi
+ if [ "$1" = "-p" ]; then
+ pid_file=$2
+ shift 2
+ fi
+ if [ "$1" = "-l" ]; then
+ lock_file=$2
+ shift 2
+ fi
+ base=${1##*/}
+
+ if [ "$_use_systemctl" = "1" ]; then
+ systemctl status ${0##*/}.service
+ return $?
+ fi
+
+ # First try "pidof"
+ __pids_var_run "$1" "$pid_file"
+ RC=$?
+ if [ -z "$pid_file" -a -z "$pid" ]; then
+ pid="$(__pids_pidof "$1")"
+ fi
+ if [ -n "$pid" ]; then
+ echo $"${base} (pid $pid) is running..."
+ return 0
+ fi
+
+ case "$RC" in
+ 0)
+ echo $"${base} (pid $pid) is running..."
+ return 0
+ ;;
+ 1)
+ echo $"${base} dead but pid file exists"
+ return 1
+ ;;
+ 4)
+ echo $"${base} status unknown due to insufficient privileges."
+ return 4
+ ;;
+ esac
+ if [ -z "${lock_file}" ]; then
+ lock_file=${base}
+ fi
+ # See if /var/lock/subsys/${lock_file} exists
+ if [ -f /var/lock/subsys/${lock_file} ]; then
+ echo $"${base} dead but subsys locked"
+ return 2
+ fi
+ echo $"${base} is stopped"
+ return 3
+}
+
+echo_success() {
+ [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+ echo -n "["
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS
+ echo -n $" OK "
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+ echo -n "]"
+ echo -ne "\r"
+ return 0
+}
+
+echo_failure() {
+ [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+ echo -n "["
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE
+ echo -n $"FAILED"
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+ echo -n "]"
+ echo -ne "\r"
+ return 1
+}
+
+echo_passed() {
+ [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+ echo -n "["
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
+ echo -n $"PASSED"
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+ echo -n "]"
+ echo -ne "\r"
+ return 1
+}
+
+echo_warning() {
+ [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+ echo -n "["
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
+ echo -n $"WARNING"
+ [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+ echo -n "]"
+ echo -ne "\r"
+ return 1
+}
+
+# Inform the graphical boot of our current state
+update_boot_stage() {
+ if [ -x /usr/bin/plymouth ]; then
+ /usr/bin/plymouth --update="$1"
+ fi
+ return 0
+}
+
+# Log that something succeeded
+success() {
+ [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_success
+ return 0
+}
+
+# Log that something failed
+failure() {
+ local rc=$?
+ [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_failure
+ [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --details
+ return $rc
+}
+
+# Log that something passed, but may have had errors. Useful for fsck
+passed() {
+ local rc=$?
+ [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_passed
+ return $rc
+}
+
+# Log a warning
+warning() {
+ local rc=$?
+ [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_warning
+ return $rc
+}
+
+# Run some action. Log its output.
+action() {
+ local STRING rc
+
+ STRING=$1
+ echo -n "$STRING "
+ shift
+ "$@" && success $"$STRING" || failure $"$STRING"
+ rc=$?
+ echo
+ return $rc
+}
+
+# returns OK if $1 contains $2
+strstr() {
+ [ "${1#*$2*}" = "$1" ] && return 1
+ return 0
+}
+
+# Confirm whether we really want to run this service
+confirm() {
+ [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --hide-splash
+ while : ; do
+ echo -n $"Start service $1 (Y)es/(N)o/(C)ontinue? [Y] "
+ read answer
+ if strstr $"yY" "$answer" || [ "$answer" = "" ] ; then
+ return 0
+ elif strstr $"cC" "$answer" ; then
+ rm -f /var/run/confirm
+ [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --show-splash
+ return 2
+ elif strstr $"nN" "$answer" ; then
+ return 1
+ fi
+ done
+}
+
+# resolve a device node to its major:minor numbers in decimal or hex
+get_numeric_dev() {
+(
+ fmt="%d:%d"
+ if [ "$1" = "hex" ]; then
+ fmt="%x:%x"
+ fi
+ ls -lH "$2" | awk '{ sub(/,/, "", $5); printf("'"$fmt"'", $5, $6); }'
+) 2>/dev/null
+}
+
+# Check whether file $1 is a backup or rpm-generated file and should be ignored
+is_ignored_file() {
+ case "$1" in
+ *~ | *.bak | *.orig | *.rpmnew | *.rpmorig | *.rpmsave)
+ return 0
+ ;;
+ esac
+ return 1
+}
+
+# Evaluate shvar-style booleans
+is_true() {
+ case "$1" in
+ [tT] | [yY] | [yY][eE][sS] | [tT][rR][uU][eE])
+ return 0
+ ;;
+ esac
+ return 1
+}
+
+# Evaluate shvar-style booleans
+is_false() {
+ case "$1" in
+ [fF] | [nN] | [nN][oO] | [fF][aA][lL][sS][eE])
+ return 0
+ ;;
+ esac
+ return 1
+}
+
+key_is_random() {
+ [ "$1" = "/dev/urandom" -o "$1" = "/dev/hw_random" \
+ -o "$1" = "/dev/random" ]
+}
+
+find_crypto_mount_point() {
+ local fs_spec fs_file fs_vfstype remaining_fields
+ local fs
+ while read fs_spec fs_file remaining_fields; do
+ if [ "$fs_spec" = "/dev/mapper/$1" ]; then
+ echo $fs_file
+ break;
+ fi
+ done < /etc/fstab
+}
+
+# Because of a chicken/egg problem, init_crypto must be run twice. /var may be
+# encrypted but /var/lib/random-seed is needed to initialize swap.
+init_crypto() {
+ local have_random dst src key opt mode owner params makeswap skip arg opt
+ local param value rc ret mke2fs mdir prompt mount_point
+
+ ret=0
+ have_random=$1
+ while read dst src key opt; do
+ [ -z "$dst" -o "${dst#\#}" != "$dst" ] && continue
+ [ -b "/dev/mapper/$dst" ] && continue;
+ if [ "$have_random" = 0 ] && key_is_random "$key"; then
+ continue
+ fi
+ if [ -n "$key" -a "x$key" != "xnone" ]; then
+ if test -e "$key" ; then
+ owner=$(ls -l $key | (read a b owner rest; echo $owner))
+ if ! key_is_random "$key"; then
+ mode=$(ls -l "$key" | cut -c 5-10)
+ if [ "$mode" != "------" ]; then
+ echo $"INSECURE MODE FOR $key"
+ fi
+ fi
+ if [ "$owner" != root ]; then
+ echo $"INSECURE OWNER FOR $key"
+ fi
+ else
+ echo $"Key file for $dst not found, skipping"
+ ret=1
+ continue
+ fi
+ else
+ key=""
+ fi
+ params=""
+ makeswap=""
+ mke2fs=""
+ skip=""
+ # Parse the src field for UUID= and convert to real device names
+ if [ "${src%%=*}" == "UUID" ]; then
+ src=$(/sbin/blkid -t "$src" -l -o device)
+ elif [ "${src/^\/dev\/disk\/by-uuid\/}" != "$src" ]; then
+ src=$(__readlink $src)
+ fi
+ # Is it a block device?
+ [ -b "$src" ] || continue
+ # Is it already a device mapper slave? (this is gross)
+ devesc=${src##/dev/}
+ devesc=${devesc//\//!}
+ for d in /sys/block/dm-*/slaves ; do
+ [ -e $d/$devesc ] && continue 2
+ done
+ # Parse the options field, convert to cryptsetup parameters and
+ # contruct the command line
+ while [ -n "$opt" ]; do
+ arg=${opt%%,*}
+ opt=${opt##$arg}
+ opt=${opt##,}
+ param=${arg%%=*}
+ value=${arg##$param=}
+
+ case "$param" in
+ cipher)
+ params="$params -c $value"
+ if [ -z "$value" ]; then
+ echo $"$dst: no value for cipher option, skipping"
+ skip="yes"
+ fi
+ ;;
+ size)
+ params="$params -s $value"
+ if [ -z "$value" ]; then
+ echo $"$dst: no value for size option, skipping"
+ skip="yes"
+ fi
+ ;;
+ hash)
+ params="$params -h $value"
+ if [ -z "$value" ]; then
+ echo $"$dst: no value for hash option, skipping"
+ skip="yes"
+ fi
+ ;;
+ verify)
+ params="$params -y"
+ ;;
+ swap)
+ makeswap=yes
+ ;;
+ tmp)
+ mke2fs=yes
+ esac
+ done
+ if [ "$skip" = "yes" ]; then
+ ret=1
+ continue
+ fi
+ if [ -z "$makeswap" ] && cryptsetup isLuks "$src" 2>/dev/null ; then
+ if key_is_random "$key"; then
+ echo $"$dst: LUKS requires non-random key, skipping"
+ ret=1
+ continue
+ fi
+ if [ -n "$params" ]; then
+ echo "$dst: options are invalid for LUKS partitions," \
+ "ignoring them"
+ fi
+ if [ -n "$key" ]; then
+ /sbin/cryptsetup -d $key luksOpen "$src" "$dst" <&1 2>/dev/null && success || failure
+ rc=$?
+ else
+ mount_point="$(find_crypto_mount_point $dst)"
+ [ -n "$mount_point" ] || mount_point=${src##*/}
+ prompt=$(printf $"%s is password protected" "$mount_point")
+ plymouth ask-for-password --prompt "$prompt" --command="/sbin/cryptsetup luksOpen -T1 $src $dst" <&1
+ rc=$?
+ fi
+ else
+ [ -z "$key" ] && plymouth --hide-splash
+ /sbin/cryptsetup $params ${key:+-d $key} create "$dst" "$src" <&1 2>/dev/null && success || failure
+ rc=$?
+ [ -z "$key" ] && plymouth --show-splash
+ fi
+ if [ $rc -ne 0 ]; then
+ ret=1
+ continue
+ fi
+ if [ -b "/dev/mapper/$dst" ]; then
+ if [ "$makeswap" = "yes" ]; then
+ mkswap "/dev/mapper/$dst" 2>/dev/null >/dev/null
+ fi
+ if [ "$mke2fs" = "yes" ]; then
+ if mke2fs "/dev/mapper/$dst" 2>/dev/null >/dev/null \
+ && mdir=$(mktemp -d /tmp/mountXXXXXX); then
+ mount "/dev/mapper/$dst" "$mdir" && chmod 1777 "$mdir"
+ umount "$mdir"
+ rmdir "$mdir"
+ fi
+ fi
+ fi
+ done < /etc/crypttab
+ return $ret
+}
+
+# A sed expression to filter out the files that is_ignored_file recognizes
+__sed_discard_ignored_files='/\(~\|\.bak\|\.orig\|\.rpmnew\|\.rpmorig\|\.rpmsave\)$/d'
+
+if [ "$_use_systemctl" = "1" ]; then
+ if [ "x$1" = xstart -o \
+ "x$1" = xstop -o \
+ "x$1" = xrestart -o \
+ "x$1" = xreload -o \
+ "x$1" = xtry-restart -o \
+ "x$1" = xforce-reload -o \
+ "x$1" = xcondrestart ] ; then
+
+ systemctl_redirect $0 $1
+ exit $?
+ fi
+fi
#!/bin/bash
#
-# sfa Wraps PLCAPI into the SFA compliant API
+# sfa
+# Provides a generic SFA wrapper based on the initial PlanetLab Implementation
#
# hopefully right after plc
# chkconfig: 2345 61 39
#
-# description: Wraps PLCAPI into the SFA compliant API
-#
+### BEGIN INIT INFO
+# Provides: sfa
+# Required-Start: postgresql
+# Required-Stop: postgresql
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: An implementation of the SFA Architecture
+### END INIT INFO
+
+####################
+# borrowed from postgresql
+function debian_get_postgresql_versions () {
+ versions=()
+ for v in `ls /usr/lib/postgresql/ 2>/dev/null`; do
+ if [ -x /usr/lib/postgresql/$v/bin/pg_ctl ] && [ ! -x /etc/init.d/postgresql-$v ]; then
+ versions+=($v)
+ fi
+ done
+ if [[ ${#versions[*]} == "0" ]]; then
+ echo "E: Missing postgresql installation. Aborting."
+ exit
+ fi
+ if [[ ${#versions[*]} != "1" ]]; then
+ echo "E: Too many postgresql versions installed. Aborting."
+ exit
+ fi
+ pgver=${versions[0]}
+}
+
+####################
+if [ -f /etc/redhat-release ] ; then
+ # source function library
+ . /etc/init.d/functions
+ PGDATA=/var/lib/pgsql/data/
+ PGWATCH=postmaster
+ PGLOCK=/var/lock/subsys/postgresql
+ SFALOCK=/var/lock/subsys/sfa-start.pid
+elif [ -f /etc/debian_version ] ; then
+ . /etc/init.d/functions.sfa
+ debian_get_postgresql_versions
+ PGDATA=/etc/postgresql/$pgver/main/
+ PGWATCH=postgres
+ PGLOCK=/var/run/postgresql/$pgver-main.pid
+ SFALOCK=/var/run/sfa-start.pid
+else
+ echo "initscript can only handle redhat/fedora or debian/ubuntu systems"
+ exit 1
+fi
+
-# source function library
-. /etc/init.d/functions
-# Default locations
-PGDATA=/var/lib/pgsql/data
postgresql_conf=$PGDATA/postgresql.conf
-pghba_conf=$PGDATA/pg_hba.conf
+pg_hba_conf=$PGDATA/pg_hba.conf
postgresql_sysconfig=/etc/sysconfig/pgsql
# SFA consolidated (merged) config file
function postgresql_check () {
# wait until postmaster is up and running - or 10s max
- if status postmaster >& /dev/null && [ -f /var/lock/subsys/postgresql ] ; then
+ if status $PGWATCH >& /dev/null && [ -f $PGLOCK ] ; then
# The only way we can be sure is if we can access it
for i in $(seq 1 10) ; do
# Must do this as the postgres user initially (before we
# Regenerate the main configuration file from default values
# overlaid with site-specific and current values.
- # Thierry -- 2007-07-05 : values in plc_config.xml are *not* taken into account here
files=( $sfa_default_config $sfa_local_config )
for file in "${files[@]}" ; do
if [ -n "$force" -o $file -nt $sfa_whole_config ] ; then
if [ -n "$force" -o $sfa_whole_config -nt /etc/sfa/sfa_config.sh ] ; then
sfa-config --shell $sfa_default_config $sfa_local_config > /etc/sfa/sfa_config.sh
fi
-# if [ -n "$force" -o $sfa_whole_config -nt /etc/sfa/php/sfa_config.php ] ; then
-# mkdir -p /etc/sfa/php
-# plc-config --php $sfa_whole_config >/etc/sfa/php/sfa_config.php
-# fi
# [re]generate the sfa_component_config
# this is a server-side thing but produces a file that somehow needs to be pushed
######## Start up the server
# not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if ! rpm -q myplc >& /dev/null ; then
+ if [ ! -f /etc/myplc-release ] ; then
echo STARTING...
service postgresql start >& /dev/null
fi
check
fi
check
- sfaadmin reg sync_db
+ # mention sfaadmin.py instead of just sfaadmin for safety
+ sfaadmin.py reg sync_db
MESSAGE=$"SFA: Checking for PostgreSQL server"
echo -n "$MESSAGE"
[ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
# not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if ! rpm -q myplc >& /dev/null ; then
+ if [ ! -f /etc/myplc-release ] ; then
service postgresql stop >& /dev/null
check
MESSAGE=$"Stopping PostgreSQL server"
[ "$SFA_FLASHPOLICY_ENABLED" == 1 ] && \
action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
- touch /var/lock/subsys/sfa-start.py
+ touch $SFALOCK
}
function stop() {
action $"Shutting down SFA" killproc sfa-start.py
+# a possible alternative reads; esp. as we remove lock manually below
+# echo $"Shutting down SFA" ; pkill '^sfa-start'
db_stop
- rm -f /var/lock/subsys/sfa-start.py
+ rm -f $SFALOCK
}
reload) reload force ;;
restart) stop; start ;;
condrestart)
- if [ -f /var/lock/subsys/sfa-start.py ]; then
+ if [ -f $SFALOCK ]; then
stop
start
fi
;;
status)
status sfa-start.py
+# possible alternative for debian
+# pids=$(pgrep '^sfa-start'); [ -n "$pids" ] && ps $pids
+
RETVAL=$?
;;
dbdump)
# description: Wraps PLCAPI into the SFA compliant API
#
+echo "sfa-cm is no longer supported"
+echo "you should consider rpm -e sfa-cm"
+exit 1
+
# Source config
-. /etc/sfa/sfa_config
+[ -f /etc/sfa/sfa_config.sh ] && . /etc/sfa/sfa_config.sh
# source function library
. /etc/init.d/functions
import shutil
from distutils.core import setup
+from sfa.util.version import version_tag
+
scripts = glob("clientbin/*.py") + \
[
'config/sfa-config-tty',
'config/sfa-config',
- 'config/gen-sfa-cm-config.py',
+# 'config/gen-sfa-cm-config.py',
'sfa/server/sfa-start.py',
- 'sfa/server/sfa_component_setup.py',
+# 'sfa/server/sfa_component_setup.py',
'sfatables/sfatables',
'keyconvert/keyconvert.py',
'flashpolicy/sfa_flashpolicy.py',
'sfa/rspecs/versions',
'sfa/client',
'sfa/planetlab',
+ 'sfa/nitos',
+ 'sfa/dummy',
'sfa/openstack',
'sfa/federica',
+ 'sfa/senslab',
'sfatables',
'sfatables/commands',
'sfatables/processors',
]
-initscripts = [ 'sfa', 'sfa-cm' ]
+initscripts = [ 'sfa' ]
+if not os.path.isfile('/etc/redhat-release'): initscripts.append('functions.sfa')
data_files = [ ('/etc/sfa/', [ 'config/aggregates.xml',
'config/registries.xml',
setup(name='sfa',
packages = packages,
data_files = data_files,
- scripts = scripts)
+ scripts = scripts,
+ url="http://svn.planet-lab.org/wiki/SFATutorial",
+ author="Thierry Parmentelat, Tony Mack, Scott Baker",
+ author_email="thierry.parmentelat@inria.fr, tmack@princeton.cs.edu, smbaker@gmail.com",
+ version=version_tag)
%define name sfa
%define version 2.1
-%define taglevel 13
+%define taglevel 25
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Distribution: PlanetLab %{plrelease}
URL: %{SCMURL}
-Summary: the SFA python libraries
+Summary: Server-side for SFA, generic implementation derived from PlanetLab
Group: Applications/System
BuildRequires: make
+BuildRequires: python-setuptools
-Requires: python >= 2.5
-Requires: pyOpenSSL >= 0.7
-Requires: m2crypto
-Requires: xmlsec1-openssl-devel
-Requires: libxslt-python
-Requires: python-ZSI
-# for uuidgen - used in db password generation
-# on f8 this actually comes with e2fsprogs, go figure
-Requires: util-linux-ng
-# xmlbuilder depends on lxml
-Requires: python-lxml
-Requires: python-setuptools
-Requires: python-dateutil
# for the registry
Requires: postgresql >= 8.2, postgresql-server >= 8.2
Requires: postgresql-python
Requires: python-migrate
# the eucalyptus aggregate uses this module
Requires: python-xmlbuilder
+# for uuidgen - used in db password generation
+# on f8 this actually comes with e2fsprogs, go figure
+Requires: util-linux-ng
+# and the SFA libraries of course
+Requires: sfa-common
-# python 2.5 has uuid module added, for python 2.4 we still need it.
-# we can't really check for if we can load uuid as a python module,
-# it'll be installed by "devel.pkgs". we have the epel repository so
-# python-uuid will be provided. but we can test for the python
-# version.
-# %define has_py24 %( python -c "import sys;sys.exit(sys.version_info[0:2] == (2,4))" 2> /dev/null; echo $? )
-# %if %has_py24
-#
-# this also didn't work very well. I'll just check for distroname - baris
-#%if %{distroname} == "centos5"
-#Requires: python-uuid
-#%endif
-
-%package flashpolicy
-Summary: SFA support for flash clients
+%package common
+Summary: Python libraries for SFA, generic implementation derived from PlanetLab
Group: Applications/System
-Requires: sfa
+Requires: python >= 2.7
+Requires: pyOpenSSL >= 0.7
+Requires: m2crypto
+Requires: python-dateutil
+Requires: python-lxml
+Requires: libxslt-python
+Requires: python-ZSI
+Requires: xmlsec1-openssl-devel
%package client
-Summary: the SFA experimenter-side CLI
+Summary: sfi, the SFA experimenter-side CLI
Group: Applications/System
-Requires: sfa
+Requires: sfa-common
Requires: pyOpenSSL >= 0.7
%package plc
Group: Applications/System
Requires: sfa
-%package cm
-Summary: the SFA layer around MyPLC NodeManager
+%package flashpolicy
+Summary: SFA support for flash clients
Group: Applications/System
Requires: sfa
-Requires: pyOpenSSL >= 0.6
%package federica
Summary: the SFA layer around Federica
Group: Applications/System
Requires: sfa
+%package nitos
+Summary: the SFA layer around NITOS
+Group: Applications/System
+Requires: sfa
+
+%package senslab
+Summary: the SFA layer around SensLab
+Group: Applications/System
+Requires: sfa
+
+%package dummy
+Summary: the SFA layer around a Dummy Testbed
+Group: Applications/System
+Requires: sfa
+
%package sfatables
Summary: sfatables policy tool for SFA
Group: Applications/System
%package tests
Summary: unit tests suite for SFA
Group: Applications/System
-Requires: sfa
+Requires: sfa-common
-%description
-This package provides the python libraries for the PlanetLab implementation of SFA
+%description
+This package provides the registry, aggregate manager and slice
+managers for SFA. In most cases it is advisable to install additional
+package for a given testbed, like e.g. sfa-plc for a PlanetLab tesbed.
-%description flashpolicy
-This package provides support for adobe flash client applications.
+%description common
+This package contains the python libraries for SFA both client and server-side.
%description client
This package provides the client side of the SFA API, in particular
This package implements the SFA interface which serves as a layer
between the existing PlanetLab interfaces and the SFA API.
-%description cm
-This package implements the SFA interface which serves as a layer
-between the existing PlanetLab NodeManager interfaces and the SFA API.
+%description flashpolicy
+This package provides support for adobe flash client applications.
%description federica
The SFA driver for FEDERICA.
+%description nitos
+The SFA driver for NITOS.
+
+%description senslab
+The SFA driver for SensLab.
+
+%description dummy
+The SFA driver for a Dummy Testbed.
+
%description sfatables
sfatables is a tool for defining access and admission control policies
in an SFA network, in much the same way as iptables is for ip
rm -rf $RPM_BUILD_ROOT
%files
-%{python_sitelib}/sfa/__init__.py*
-%{python_sitelib}/sfa/trust
-%{python_sitelib}/sfa/storage
-%{python_sitelib}/sfa/util
-%{python_sitelib}/sfa/server
-%{python_sitelib}/sfa/methods
-%{python_sitelib}/sfa/generic
-%{python_sitelib}/sfa/managers
-%{python_sitelib}/sfa/importer
-%{python_sitelib}/sfa/rspecs
-%{python_sitelib}/sfa/client
/etc/init.d/sfa
%{_bindir}/sfa-start.py*
%{_bindir}/sfaadmin.py*
/usr/share/sfa/examples
/var/www/html/wsdl/*.wsdl
-%files flashpolicy
-%{_bindir}/sfa_flashpolicy.py*
-/etc/sfa/sfa_flashpolicy_config.xml
+%files common
+%{python_sitelib}/sfa/__init__.py*
+%{python_sitelib}/sfa/trust
+%{python_sitelib}/sfa/storage
+%{python_sitelib}/sfa/util
+%{python_sitelib}/sfa/server
+%{python_sitelib}/sfa/methods
+%{python_sitelib}/sfa/generic
+%{python_sitelib}/sfa/managers
+%{python_sitelib}/sfa/importer
+%{python_sitelib}/sfa/rspecs
+%{python_sitelib}/sfa/client
%files client
%config (noreplace) /etc/sfa/sfi_config
/etc/sfa/xml.xsd
/etc/sfa/protogeni-rspec-common.xsd
/etc/sfa/topology
-%{_bindir}/gen-sfa-cm-config.py*
-%files cm
-/etc/init.d/sfa-cm
-%{_bindir}/sfa_component_setup.py*
-# cron jobs here
+%files flashpolicy
+%{_bindir}/sfa_flashpolicy.py*
+/etc/sfa/sfa_flashpolicy_config.xml
%files federica
%{python_sitelib}/sfa/federica
+%files nitos
+%{python_sitelib}/sfa/nitos
+
+%files senslab
+%{python_sitelib}/sfa/senslab
+
+%files dummy
+%{python_sitelib}/sfa/dummy
+
%files sfatables
/etc/sfatables/*
%{_bindir}/sfatables
%postun
[ "$1" -ge "1" ] && { service sfa dbdump ; service sfa restart ; }
-### sfa-cm installs the 'sfa-cm' service
-%post cm
-chkconfig --add sfa-cm
-
-%preun cm
-if [ "$1" = 0 ] ; then
- /sbin/service sfa-cm stop || :
- /sbin/chkconfig --del sfa-cm || :
-fi
-
-%postun cm
-[ "$1" -ge "1" ] && service sfa-cm restart || :
+#### sfa-cm installs the 'sfa-cm' service
+#%post cm
+#chkconfig --add sfa-cm
+#
+#%preun cm
+#if [ "$1" = 0 ] ; then
+# /sbin/service sfa-cm stop || :
+# /sbin/chkconfig --del sfa-cm || :
+#fi
+#
+#%postun cm
+#[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Tue Feb 26 2013 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-25
+- sfi and sfaadmin list now share the same display code for related objs
+- support for advertising alternate api urls - for other API versions - api_versions.xml
+- cleaned up GID class
+- senslab: improved importer
+- senslab: add site to SlabLocation from Location
+- senslab: new class JsonPage
+- senslab: fix debian packaging
+- federica: fix list_slices
+
+* Tue Jan 29 2013 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-24
+- merged the senslab driver from git.f-lab.fr
+- merged the teagle flavour
+- debian packaging should work much better
+- added debug messsages for when db connection fails
+
+* Sun Jan 20 2013 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-23
+- minor fix in registry
+- fix for sfi gid, use clientbootstrap
+- support for debians and ubuntus (packaging and initscript)
+- deprecated cm package altogether
+- pl flavour, minor fix for tags
+- various fixes for the dummy flavour
+
+* Sun Dec 16 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-22
+- suited (and required) to run with plcapi-5.1-5 b/c of changes to AddPerson
+- tweaks in nitos importer
+- improvements to sfaadmin check-gid
+
+* Tue Dec 11 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-21
+- PL importer: minor fixes for corner cases
+- PL importer: also handles last_updated more accurately
+- sfi update can be used to select a key among several in PL
+- sfi add/update usage message fixes (no more record)
+- new feature sfaadmin registry check_gid [-a]
+
+* Mon Dec 03 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-20
+- fix 2 major bugs in PL importer
+- esp. wrt GID management against PLC key
+
+* Wed Nov 28 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-19
+- nicer sfi delegate, can handle multiple delegations and for authorities(pi) as well
+
+* Wed Nov 28 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-18
+- support fordelegation in sfaclientlib
+- sfi delegate fixed
+- other delegation-related sfi option trashed
+- new config (based on ini format)
+- new dummy driver and related package
+- pl importer has more explicit error messages
+- credential dump shows expiration
+
+* Tue Oct 16 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-17
+- bugfix in forwarding Resolve requests
+- various fixes in the nitos driver wrt keys and users
+
+* Mon Oct 01 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-16
+- various tweaks for the nitos driver
+
+* Wed Sep 26 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-15
+- first stab at a driver for the NITOS/OMF testbed (sep. pkg)
+- deeper cleanup of the data-dependencies between SFA and the testbed
+- in particular, sfi create issues Resolve(details=False)
+- for that purpose, Resolve exposes reg-* keys for SFA builtins
+- which in turn allows sfi list to show PIs, slice members and keys
+- NOTE: sfa-config-tty is known to be broken w/ less frequently used func's
+- Shows stacktrace when startup fails (DB conn, wrong flavour, etc..)
+
+* Mon Sep 17 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-14
+- configurable data-dir (/var/lib/sfa)
+- no more dependent on myplc-config
+- some support for hrns with _ instead of \.
+- fix for PL importing in presence of gpg keys
+- DeleteSliver returns True instead of 1 in case of success
+- Various improvements on the openstack/nova side
+- new package sfa-nitos
+
* Wed Jul 11 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-13
- bugfix that prevented to call 'sfi create' - (was broken in sfa-2.1-12)
- sfi to remove expired credentials
-
+###
+#
+# Thierry - 2012 sept 21
+#
+# it seems terribly wrong that the client should decide to use PG- or PL- related code
+# esp. in a context where we're trying to have more and more kinds of testbeds involved
+#
+# also, the 'users' filed that CreateSliver is expecting (the key point here is to get this right)
+# is specified to have at least a urn and a list of keys, both of these being supported natively
+# in the sfa db
+# So long story short, it seems to me that we should have a common code that fills in 'urn' and 'keys'
+# and then code that tentatively tries to add as much extra info that we can get on these users
+#
+# the fact e.g. that PlanetLab insists on getting a first_name and last_name is not
+# exactly consistent with the GENI spec. of CreateSliver
+#
def pg_users_arg(records):
users = []
for record in records:
users.append(user)
return users
-def sfa_users_arg(records, slice_record):
+def sfa_users_arg (records, slice_record):
users = []
for record in records:
if record['type'] != 'user':
continue
- user = {'urn': record['geni_urn'], #
- 'keys': record['keys'],
- 'email': record['email'], # needed for MyPLC
- 'person_id': record['person_id'], # needed for MyPLC
- 'first_name': record['first_name'], # needed for MyPLC
- 'last_name': record['last_name'], # needed for MyPLC
- 'slice_record': slice_record, # needed for legacy refresh peer
- 'key_ids': record['key_ids'] # needed for legacy refresh peer
- }
+ user = {'urn': record['reg-urn'],
+ 'keys': record['reg-keys'],
+ 'slice_record': slice_record,
+ }
+ # fill as much stuff as possible from planetlab or similar
+ # note that reg-email is not yet available
+ pl_fields = ['email', 'person_id', 'first_name', 'last_name', 'key_ids']
+ nitos_fields = [ 'email', 'user_id' ]
+ extra_fields = list ( set(pl_fields).union(set(nitos_fields)))
+ # try to fill all these in
+ for field in extra_fields:
+ if record.has_key(field): user[field]=record[field]
users.append(user)
- return users
+
+ return users
def sfa_to_pg_users_arg(users):
--- /dev/null
+# a few utilities common to sfi and sfaadmin
+
+def optparse_listvalue_callback(option, opt, value, parser):
+ former=getattr(parser.values,option.dest)
+ if not former: former=[]
+ setattr(parser.values, option.dest, former+value.split(','))
+
+def optparse_dictvalue_callback (option, option_string, value, parser):
+ try:
+ (k,v)=value.split('=',1)
+ d=getattr(parser.values, option.dest)
+ d[k]=v
+ except:
+ parser.print_help()
+ sys.exit(1)
+
+# a code fragment that could be helpful for argparse which unfortunately is
+# available with 2.7 only, so this feels like too strong a requirement for the client side
+#class ExtraArgAction (argparse.Action):
+# def __call__ (self, parser, namespace, values, option_string=None):
+# would need a try/except of course
+# (k,v)=values.split('=')
+# d=getattr(namespace,self.dest)
+# d[k]=v
+#####
+#parser.add_argument ("-X","--extra",dest='extras', default={}, action=ExtraArgAction,
+# help="set extra flags, testbed dependent, e.g. --extra enabled=true")
+
+##############################
+# these are not needed from the outside
+def terminal_render_plural (how_many, name,names=None):
+ if not names: names="%ss"%name
+ if how_many<=0: return "No %s"%name
+ elif how_many==1: return "1 %s"%name
+ else: return "%d %s"%(how_many,names)
+
+def terminal_render_default (record,options):
+ print "%s (%s)" % (record['hrn'], record['type'])
+def terminal_render_user (record, options):
+ print "%s (User)"%record['hrn'],
+ if record.get('reg-pi-authorities',None): print " [PI at %s]"%(" and ".join(record['reg-pi-authorities'])),
+ if record.get('reg-slices',None): print " [IN slices %s]"%(" and ".join(record['reg-slices'])),
+ user_keys=record.get('reg-keys',[])
+ if not options.verbose:
+ print " [has %s]"%(terminal_render_plural(len(user_keys),"key"))
+ else:
+ print ""
+ for key in user_keys: print 8*' ',key.strip("\n")
+
+def terminal_render_slice (record, options):
+ print "%s (Slice)"%record['hrn'],
+ if record.get('reg-researchers',None): print " [USERS %s]"%(" and ".join(record['reg-researchers'])),
+# print record.keys()
+ print ""
+def terminal_render_authority (record, options):
+ print "%s (Authority)"%record['hrn'],
+ if record.get('reg-pis',None): print " [PIS %s]"%(" and ".join(record['reg-pis'])),
+ print ""
+def terminal_render_node (record, options):
+ print "%s (Node)"%record['hrn']
+
+
+### used in sfi list
+def terminal_render (records,options):
+ # sort records by type
+ grouped_by_type={}
+ for record in records:
+ type=record['type']
+ if type not in grouped_by_type: grouped_by_type[type]=[]
+ grouped_by_type[type].append(record)
+ group_types=grouped_by_type.keys()
+ group_types.sort()
+ for type in group_types:
+ group=grouped_by_type[type]
+# print 20 * '-', type
+ try: renderer=eval('terminal_render_'+type)
+ except: renderer=terminal_render_default
+ for record in group: renderer(record,options)
+
+
+####################
+def filter_records(type, records):
+ filtered_records = []
+ for record in records:
+ if (record['type'] == type) or (type == "all"):
+ filtered_records.append(record)
+ return filtered_records
+
+
from sfa.client.sfi import save_records_to_file
from sfa.trust.hierarchy import Hierarchy
from sfa.trust.gid import GID
+from sfa.trust.certificate import convert_public_key
from sfa.client.candidates import Candidates
+from sfa.client.common import optparse_listvalue_callback, terminal_render, filter_records
+
pprinter = PrettyPrinter(indent=4)
try:
except:
help_basedir='*unable to locate Hierarchy().basedir'
-def optparse_listvalue_callback(option, opt, value, parser):
- setattr(parser.values, option.dest, value.split(','))
-
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('options', []).insert(0, (args, kwargs))
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='authority to list (hrn/urn - mandatory)')
@args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@args('-r', '--recursive', dest='recursive', metavar='<recursive>', help='list all child records',
- action='store_true', default=False)
- def list(self, xrn, type=None, recursive=False):
+ action='store_true', default=False)
+ @args('-v', '--verbose', dest='verbose', action='store_true', default=False)
+ def list(self, xrn, type=None, recursive=False, verbose=False):
"""List names registered at a given authority - possibly filtered by type"""
xrn = Xrn(xrn, type)
- options = {'recursive': recursive}
- records = self.api.manager.List(self.api, xrn.get_hrn(), options=options)
- for record in records:
- if not type or record['type'] == type:
- print "%s (%s)" % (record['hrn'], record['type'])
+ options_dict = {'recursive': recursive}
+ records = self.api.manager.List(self.api, xrn.get_hrn(), options=options_dict)
+ list = filter_records(type, records)
+ # terminal_render expects an options object
+ class Options: pass
+ options=Options()
+ options.verbose=verbose
+ terminal_render (list, options)
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
choices=('text', 'xml', 'simple'), help='display record in different formats')
def show(self, xrn, type=None, format=None, outfile=None):
"""Display details for a registered object"""
- records = self.api.manager.Resolve(self.api, xrn, type, True)
+ records = self.api.manager.Resolve(self.api, xrn, type, details=True)
for record in records:
sfa_record = Record(dict=record)
sfa_record.dump(format)
record_dict['pi'] = pis
return record_dict
+
+ @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
+ @args('-t', '--type', dest='type', metavar='<type>', help='object type (mandatory)',)
+ @args('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False, help='check all users GID')
+ @args('-v', '--verbose', dest='verbose', metavar='<verbose>', action='store_true', default=False, help='verbose mode: display user\'s hrn ')
+ def check_gid(self, xrn=None, type=None, all=None, verbose=None):
+ """Check the correspondance between the GID and the PubKey"""
+
+ # db records
+ from sfa.storage.alchemy import dbsession
+ from sfa.storage.model import RegRecord
+ db_query = dbsession.query(RegRecord).filter_by(type=type)
+ if xrn and not all:
+ hrn = Xrn(xrn).get_hrn()
+ db_query = db_query.filter_by(hrn=hrn)
+ elif all and xrn:
+ print "Use either -a or -x <xrn>, not both !!!"
+ sys.exit(1)
+ elif not all and not xrn:
+ print "Use either -a or -x <xrn>, one of them is mandatory !!!"
+ sys.exit(1)
+
+ records = db_query.all()
+ if not records:
+ print "No Record found"
+ sys.exit(1)
+
+ OK = []
+ NOK = []
+ ERROR = []
+ NOKEY = []
+ for record in records:
+ # get the pubkey stored in SFA DB
+ if record.reg_keys:
+ db_pubkey_str = record.reg_keys[0].key
+ try:
+ db_pubkey_obj = convert_public_key(db_pubkey_str)
+ except:
+ ERROR.append(record.hrn)
+ continue
+ else:
+ NOKEY.append(record.hrn)
+ continue
+
+ # get the pubkey from the gid
+ gid_str = record.gid
+ gid_obj = GID(string = gid_str)
+ gid_pubkey_obj = gid_obj.get_pubkey()
+
+ # Check if gid_pubkey_obj and db_pubkey_obj are the same
+ check = gid_pubkey_obj.is_same(db_pubkey_obj)
+ if check :
+ OK.append(record.hrn)
+ else:
+ NOK.append(record.hrn)
+
+ if not verbose:
+ print "Users NOT having a PubKey: %s\n\
+Users having a non RSA PubKey: %s\n\
+Users having a GID/PubKey correpondence OK: %s\n\
+Users having a GID/PubKey correpondence Not OK: %s\n"%(len(NOKEY), len(ERROR), len(OK), len(NOK))
+ else:
+ print "Users NOT having a PubKey: %s and are: \n%s\n\n\
+Users having a non RSA PubKey: %s and are: \n%s\n\n\
+Users having a GID/PubKey correpondence OK: %s and are: \n%s\n\n\
+Users having a GID/PubKey correpondence NOT OK: %s and are: \n%s\n\n"%(len(NOKEY),NOKEY, len(ERROR), ERROR, len(OK), OK, len(NOK), NOK)
+
+
+
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
@args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
@args('-e', '--email', dest='email', default="",
help='Description, useful for slices', default=None)
@args('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
- @args('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ @args('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='slice researchers',
+ @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@args('-p', '--pis', dest='pis', metavar='<PIs>',
- help='Principal Investigators/Project Managers ',
+ help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
def register(self, xrn, type=None, url=None, description=None, key=None, slices='',
pis='', researchers='',email=''):
help='Description', default=None)
@args('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
- @args('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ @args('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='slice researchers',
+ @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
@args('-p', '--pis', dest='pis', metavar='<PIs>',
- help='Principal Investigators/Project Managers ',
+ help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
def update(self, xrn, type=None, url=None, description=None, key=None, slices='',
pis='', researchers=''):
"""Update an existing Registry record"""
+ print 'incoming PIS',pis
record_dict = self._record_dict(xrn=xrn, type=type, url=url, description=description,
key=key, slices=slices, researchers=researchers, pis=pis)
self.api.manager.Update(self.api, record_dict)
importer.run()
def sync_db(self):
- """Initiailize or upgrade the db"""
+ """Initialize or upgrade the db"""
from sfa.storage.dbschema import DBSchema
dbschema=DBSchema()
dbschema.init_or_upgrade
# see optimizing dependencies below
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.credential import Credential
+from sfa.trust.gid import GID
##########
# a helper class to implement the bootstrapping of crypto. material
# assuming we are starting from scratch on the client side
# the usage model is to reuse an existing keypair)
#
# there might be a more portable, i.e. less language-dependant way, to
-# implement this step by exec'ing the openssl command a known
-# successful attempt at this approach that worked for Java is
-# documented below
+# implement this step by exec'ing the openssl command.
+# a known successful attempt at this approach that worked
+# for Java is documented below
# http://nam.ece.upatras.gr/fstoolkit/trac/wiki/JavaSFAClient
#
####################
def private_key (self):
self.assert_private_key()
return self.private_key_filename()
+
+ def delegate_credential_string (self, original_credential, to_hrn, to_type='authority'):
+ """
+ sign a delegation credential to someone else
+
+ original_credential : typically one's user- or slice- credential to be delegated to s/b else
+ to_hrn : the hrn of the person that will be allowed to do stuff on our behalf
+ to_type : goes with to_hrn, usually 'user' or 'authority'
+
+ returns a string with the delegated credential
+
+ this internally uses self.my_gid()
+ it also retrieves the gid for to_hrn/to_type
+ and uses Credential.delegate()"""
+
+ # the gid and hrn of the object we are delegating
+ if isinstance (original_credential, str):
+ original_credential = Credential (string=original_credential)
+ original_gid = original_credential.get_gid_object()
+ original_hrn = original_gid.get_hrn()
+
+ if not original_credential.get_privileges().get_all_delegate():
+ self.logger.error("delegate_credential_string: original credential %s does not have delegate bit set"%original_hrn)
+ return
+
+ # the delegating user's gid
+ my_gid = self.my_gid()
+
+ # retrieve the GID for the entity that we're delegating to
+ to_gidfile = self.gid (to_hrn,to_type)
+# to_gid = GID ( to_gidfile )
+# to_hrn = delegee_gid.get_hrn()
+# print 'to_hrn',to_hrn
+ delegated_credential = original_credential.delegate(to_gidfile, self.private_key(), my_gid)
+ return delegated_credential.save_to_string(save_parents=True)
CM_PORT=12346
-# utility methods here
-def optparse_listvalue_callback(option, option_string, value, parser):
- setattr(parser.values, option.dest, value.split(','))
-
-# a code fragment that could be helpful for argparse which unfortunately is
-# available with 2.7 only, so this feels like too strong a requirement for the client side
-#class ExtraArgAction (argparse.Action):
-# def __call__ (self, parser, namespace, values, option_string=None):
-# would need a try/except of course
-# (k,v)=values.split('=')
-# d=getattr(namespace,self.dest)
-# d[k]=v
-#####
-#parser.add_argument ("-X","--extra",dest='extras', default={}, action=ExtraArgAction,
-# help="set extra flags, testbed dependent, e.g. --extra enabled=true")
-
-def optparse_dictvalue_callback (option, option_string, value, parser):
- try:
- (k,v)=value.split('=',1)
- d=getattr(parser.values, option.dest)
- d[k]=v
- except:
- parser.print_help()
- sys.exit(1)
+from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, \
+ terminal_render, filter_records
# display methods
def display_rspec(rspec, format='rspec'):
("version", ""),
("list", "authority"),
("show", "name"),
- ("add", "record"),
- ("update", "record"),
+ ("add", "[record]"),
+ ("update", "[record]"),
("remove", "name"),
("resources", ""),
("describe", "slice_hrn"),
("shutdown", "slice_hrn"),
("get_ticket", "slice_hrn rspec"),
("redeem_ticket", "ticket"),
- ("delegate", "name"),
+ ("delegate", "to_hrn"),
("gid", "[name]"),
("trusted", "cred"),
("config", ""),
parser.add_option('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
parser.add_option('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
parser.add_option('-e', '--email', dest='email', default="", help="email (mandatory for users)")
-# use --extra instead
-# parser.add_option('-u', '--url', dest='url', metavar='<url>', default=None, help="URL, useful for slices")
-# parser.add_option('-d', '--description', dest='description', metavar='<description>',
-# help='Description, useful for slices', default=None)
parser.add_option('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
default=None)
- parser.add_option('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ parser.add_option('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
parser.add_option('-r', '--researchers', dest='researchers', metavar='<researchers>',
- help='slice researchers', default='', type="str", action='callback',
+ help='Set/replace slice researchers', default='', type="str", action='callback',
callback=optparse_listvalue_callback)
- parser.add_option('-p', '--pis', dest='pis', metavar='<PIs>', help='Principal Investigators/Project Managers',
+ parser.add_option('-p', '--pis', dest='pis', metavar='<PIs>', help='Set/replace Principal Investigators/Project Managers',
default='', type="str", action='callback', callback=optparse_listvalue_callback)
-# use --extra instead
-# parser.add_option('-f', '--firstname', dest='firstname', metavar='<firstname>', help='user first name')
-# parser.add_option('-l', '--lastname', dest='lastname', metavar='<lastname>', help='user last name')
parser.add_option ('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>",
action="callback", callback=optparse_dictvalue_callback, nargs=1,
help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
if command == 'list':
parser.add_option("-r", "--recursive", dest="recursive", action='store_true',
help="list all child records", default=False)
+ parser.add_option("-v", "--verbose", dest="verbose", action='store_true',
+ help="gives details, like user keys", default=False)
if command in ("delegate"):
parser.add_option("-u", "--user",
- action="store_true", dest="delegate_user", default=False,
- help="delegate user credential")
- parser.add_option("-s", "--slice", dest="delegate_slice",
- help="delegate slice credential", metavar="HRN", default=None)
+ action="store_true", dest="delegate_user", default=False,
+ help="delegate your own credentials; default if no other option is provided")
+ parser.add_option("-s", "--slice", dest="delegate_slices",action='append',default=[],
+ metavar="slice_hrn", help="delegate cred. for slice HRN")
+ parser.add_option("-a", "--auths", dest='delegate_auths',action='append',default=[],
+ metavar='auth_hrn', help="delegate cred for auth HRN")
+ # this primarily is a shorthand for -a my_hrn^
+ parser.add_option("-p", "--pi", dest='delegate_pi', default=None, action='store_true',
+ help="delegate your PI credentials, so s.t. like -a your_hrn^")
+ parser.add_option("-A","--to-authority",dest='delegate_to_authority',action='store_true',default=False,
+ help="""by default the mandatory argument is expected to be a user,
+use this if you mean an authority instead""")
if command in ("version"):
parser.add_option("-R","--registry-version",
# Main: parse arguments and dispatch to command
#
def dispatch(self, command, command_options, command_args):
- return getattr(self, command)(command_options, command_args)
+ method=getattr(self, command,None)
+ if not method:
+ print "Unknown command %s"%command
+ return
+ return method(command_options, command_args)
def main(self):
self.sfi_parser = self.create_parser()
try:
self.dispatch(command, command_options, command_args)
- except KeyError:
- self.logger.critical ("Unknown command %s"%command)
+ except:
+ self.logger.log_exc ("sfi command %s failed"%command)
sys.exit(1)
return
sys.exit(-1)
return self.client_bootstrap.authority_credential_string (self.authority)
+ def authority_credential_string(self, auth_hrn):
+ return self.client_bootstrap.authority_credential_string (auth_hrn)
+
def slice_credential_string(self, name):
return self.client_bootstrap.slice_credential_string (name)
raise Exception, "Not enough parameters for the 'list' command"
# filter on person, slice, site, node, etc.
- # THis really should be in the self.filter_records funct def comment...
+ # This really should be in the self.filter_records funct def comment...
list = filter_records(options.type, list)
- for record in list:
- print "%s (%s)" % (record['hrn'], record['type'])
+ terminal_render (list, options)
if options.file:
save_records_to_file(options.file, list, options.fileformat)
return
self.print_help()
sys.exit(1)
hrn = args[0]
- record_dicts = self.registry().Resolve(hrn, self.my_credential_string)
+ # explicitly require Resolve to run in details mode
+ record_dicts = self.registry().Resolve(hrn, self.my_credential_string, {'details':True})
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
self.logger.error("No record of type %s"% options.type)
return
def add(self, options, args):
- "add record into registry from xml file (Register)"
+ "add record into registry by using the command options (Recommended) or from xml file (Register)"
auth_cred = self.my_authority_credential_string()
if options.show_credential:
show_credentials(auth_cred)
record_dict = {}
- if len(args) > 0:
- record_filepath = args[0]
- rec_file = self.get_record_file(record_filepath)
- record_dict.update(load_record_from_file(rec_file).todict())
+ if len(args) > 1:
+ self.print_help()
+ sys.exit(1)
+ if len(args)==1:
+ try:
+ record_filepath = args[0]
+ rec_file = self.get_record_file(record_filepath)
+ record_dict.update(load_record_from_file(rec_file).todict())
+ except:
+ print "Cannot load record file %s"%record_filepath
+ sys.exit(1)
if options:
record_dict.update(load_record_from_opts(options).todict())
# we should have a type by now
return self.registry().Register(record_dict, auth_cred)
def update(self, options, args):
- "update record into registry from xml file (Update)"
+ "update record into registry by using the command options (Recommended) or from xml file (Update)"
record_dict = {}
if len(args) > 0:
record_filepath = args[0]
server = self.sliceapi()
# creds
creds = [self.my_credential_string]
- if options.delegate:
- delegated_cred = self.delegate_cred(self.my_credential_string, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
api_options['call_id']=unique_call_id()
# keys: [<ssh key A>, <ssh key B>]
# }]
users = []
+ # xxx Thierry 2012 sept. 21
+ # contrary to what I was first thinking, calling Resolve with details=False does not yet work properly here
+ # I am turning details=True on again on a - hopefully - temporary basis, just to get this whole thing to work again
slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string])
- if slice_records and 'researcher' in slice_records[0] and slice_records[0]['researcher']!=[]:
+ # slice_records = self.registry().Resolve(slice_urn, [self.my_credential_string], {'details':True})
+ if slice_records and 'reg-researchers' in slice_records[0] and slice_records[0]['reg-researchers']:
slice_record = slice_records[0]
- user_hrns = slice_record['researcher']
+ user_hrns = slice_record['reg-researchers']
user_urns = [hrn_to_urn(hrn, 'user') for hrn in user_hrns]
user_records = self.registry().Resolve(user_urns, [self.my_credential_string])
# creds
slice_cred = self.slice_credential(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
# creds
slice_cred = self.slice_credential(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
# cred
slice_cred = self.slice_credential_string(args[0])
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# xxx Thierry - does this not need an api_options as well ?
result = server.Start(slice_urn, creds)
value = ReturnValue.get_value(result)
# cred
slice_cred = self.slice_credential_string(args[0])
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
result = server.Stop(slice_urn, creds)
value = ReturnValue.get_value(result)
if self.options.raw:
# creds
slice_cred = self.slice_credential(args[0])
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# options and call_id when supported
api_options = {}
api_options['call_id']=unique_call_id()
# creds
slice_cred = self.slice_credential(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
result = server.Shutdown(slice_urn, creds)
value = ReturnValue.get_value(result)
if self.options.raw:
# creds
slice_cred = self.slice_credential_string(slice_hrn)
creds = [slice_cred]
- if options.delegate:
- delegated_cred = self.delegate_cred(slice_cred, get_authority(self.authority))
- creds.append(delegated_cred)
# rspec
rspec_file = self.get_rspec_file(rspec_path)
rspec = open(rspec_file).read()
self.print_help()
sys.exit(1)
target_hrn = args[0]
- gid = self.registry().CreateGid(self.my_credential_string, target_hrn, self.client_bootstrap.my_gid_string())
+ my_gid_string = open(self.client_bootstrap.my_gid()).read()
+ gid = self.registry().CreateGid(self.my_credential_string, target_hrn, my_gid_string)
if options.file:
filename = options.file
else:
GID(string=gid).save_to_file(filename)
- def delegate(self, options, args):
+ def delegate (self, options, args):
"""
(locally) create delegate credential for use by given hrn
"""
- delegee_hrn = args[0]
- if options.delegate_user:
- cred = self.delegate_cred(self.my_credential_string, delegee_hrn, 'user')
- elif options.delegate_slice:
- slice_cred = self.slice_credential_string(options.delegate_slice)
- cred = self.delegate_cred(slice_cred, delegee_hrn, 'slice')
- else:
- self.logger.warning("Must specify either --user or --slice <hrn>")
- return
- delegated_cred = Credential(string=cred)
- object_hrn = delegated_cred.get_gid_object().get_hrn()
+ if len(args) != 1:
+ self.print_help()
+ sys.exit(1)
+ to_hrn = args[0]
+ # support for several delegations in the same call
+ # so first we gather the things to do
+ tuples=[]
+ for slice_hrn in options.delegate_slices:
+ message="%s.slice"%slice_hrn
+ original = self.slice_credential_string(slice_hrn)
+ tuples.append ( (message, original,) )
+ if options.delegate_pi:
+ my_authority=self.authority
+ message="%s.pi"%my_authority
+ original = self.my_authority_credential_string()
+ tuples.append ( (message, original,) )
+ for auth_hrn in options.delegate_auths:
+ message="%s.auth"%auth_hrn
+ original=self.authority_credential_string(auth_hrn)
+ tuples.append ( (message, original, ) )
+ # if nothing was specified at all at this point, let's assume -u
+ if not tuples: options.delegate_user=True
+ # this user cred
if options.delegate_user:
- dest_fn = os.path.join(self.options.sfi_dir, get_leaf(delegee_hrn) + "_"
- + get_leaf(object_hrn) + ".cred")
- elif options.delegate_slice:
- dest_fn = os.path.join(self.options.sfi_dir, get_leaf(delegee_hrn) + "_slice_"
- + get_leaf(object_hrn) + ".cred")
-
- delegated_cred.save_to_file(dest_fn, save_parents=True)
-
- self.logger.info("delegated credential for %s to %s and wrote to %s"%(object_hrn, delegee_hrn,dest_fn))
+ message="%s.user"%self.user
+ original = self.my_credential_string
+ tuples.append ( (message, original, ) )
+
+ # default type for beneficial is user unless -A
+ if options.delegate_to_authority: to_type='authority'
+ else: to_type='user'
+
+ # let's now handle all this
+ # it's all in the filenaming scheme
+ for (message,original) in tuples:
+ delegated_string = self.client_bootstrap.delegate_credential_string(original, to_hrn, to_type)
+ delegated_credential = Credential (string=delegated_string)
+ filename = os.path.join ( self.options.sfi_dir,
+ "%s_for_%s.%s.cred"%(message,to_hrn,to_type))
+ delegated_credential.save_to_file(filename, save_parents=True)
+ self.logger.info("delegated credential for %s to %s and wrote to %s"%(message,to_hrn,filename))
def trusted(self, options, args):
"""
--- /dev/null
+###################################### DUMMY TESTBED DRIVER FOR SFA ############################################
+
+In order to make easy the adoption of SFA by the testbed owners, we decided to implement this DUMMY TESTBED DRIVER FOR SFA which represent one flavour of SFA (dummy).
+
+Testbed owners deciding to wrap their testbed with SFA, can follow this small step-by-step guide to know how SFA works, how it interact with the testbed and what are the needed pieces to glue SFA and the testbed.
+
+
+STEP-BY-STEP GUIDE :
+
+1. Install SFA (http://svn.planet-lab.org/wiki/SFATutorialInstall#InstallingSFA)
+(On RPM based OS, the SFA sources go here : /usr/lib/python2.7/site-packages/sfa )
+
+2. Launch the Dummy testbed XML-RPC API:
+
+# python /usr/lib/python2.7/site-packages/sfa/dummy/dummy_testbed_api.py
+
+3. Configure SFA to the "dummy" flavour as follow :
+
+# sfa-config-tty
+Enter command (u for usual changes, w to save, ? for help) u
+== sfa_generic_flavour : [dummy] dummy ("dummy" flavour)
+== sfa_interface_hrn : [pla] pla (Choose your Authority name)
+== sfa_registry_root_auth : [pla] pla (Choose your Authority name)
+== sfa_registry_host : [localhost] localhost
+== sfa_aggregate_host : [localhost] localhost
+== sfa_sm_host : [localhost] localhost
+== sfa_db_host : [localhost] localhost
+== sfa_dummy_url : [http://127.0.0.1:8080]
+Enter command (u for usual changes, w to save, ? for help) w
+Wrote /etc/sfa/configs/site_config
+Merged
+ /etc/sfa/default_config.xml
+and /etc/sfa/configs/site_config
+into /etc/sfa/sfa_config
+You might want to type 'r' (restart sfa), 'R' (reload sfa) or 'q' (quit)
+Enter command (u for usual changes, w to save, ? for help) r
+==================== Stopping sfa
+Shutting down SFA [ OK ]
+==================== Starting sfa
+SFA: Checking for PostgreSQL server [ OK ]
+SFA: installing peer certs [ OK ]
+SFA: Registry [ OK ]
+SFA: Aggregate [ OK ]
+SFA: SliceMgr [ OK ]
+Enter command (u for usual changes, w to save, ? for help) q
+
+4. Add your user to the dummy testbed and attach it to a slice:
+
+Edit /usr/lib/python2.7/site-packages/sfa/dummy/dummy_testbed_api_client.py with your user info and run:
+
+# python /usr/lib/python2.7/site-packages/sfa/dummy/dummy_testbed_api_client.py
+
+5. Import Dummy testbed data to SFA (users, slices, nodes):
+
+# sfaadmin.py reg import_registry
+
+6. Configure you SFI client (http://svn.planet-lab.org/wiki/SFATutorialConfigureSFA#ConfigureSFAClientSFI)
+
+7. Make a test:
+update the following command with your already configured Authority name.
+
+# sfi.py list pla.dummy
+
+8. Now continue testing SFA, have a look at the dummy driver code and write your testbed driver for SFA... Enjoy.
+
+
--- /dev/null
+import SimpleXMLRPCServer
+import time
+
+dummy_api_addr = ("localhost", 8080)
+
+# Fake Testbed DB
+
+nodes_list = []
+for i in range(1,11):
+ node = {'hostname': 'node'+str(i)+'.dummy-testbed.org', 'type': 'dummy-node', 'node_id': i}
+ nodes_list.append(node)
+
+slices_list = []
+for i in range(1,3):
+ slice = {'slice_name': 'slice'+str(i), 'user_ids': range(i,4,2), 'slice_id': i, 'node_ids': range(i,10,2), 'enabled': True, 'expires': int(time.time())+60*60*24*30}
+ slices_list.append(slice)
+
+users_list = []
+for i in range(1,5):
+ user = {'user_name': 'user'+str(i), 'user_id': i, 'email': 'user'+str(i)+'@dummy-testbed.org', 'keys': ['user_ssh_pub_key_'+str(i)]}
+ users_list.append(user)
+
+DB = {'nodes_list': nodes_list,'node_index': 11, 'slices_list': slices_list, 'slice_index': 3, 'users_list': users_list, 'user_index': 5}
+
+#Filter function gor the GET methods
+
+def FilterList(myfilter, mylist):
+ result = []
+ result.extend(mylist)
+ for item in mylist:
+ for key in myfilter.keys():
+ if 'ids' in key:
+ pass
+ else:
+ if myfilter[key] != item[key]:
+ result.remove(item)
+ break
+ return result
+
+
+# RPC functions definition
+#GET
+def GetTestbedInfo():
+ return {'name': 'dummy', 'longitude': 123456, 'latitude': 654321, 'domain':'dummy-testbed.org'}
+
+def GetNodes(filter={}):
+ global DB
+ result = []
+ result.extend(DB['nodes_list'])
+ if 'node_ids' in filter:
+ for node in DB['nodes_list']:
+ if node['node_id'] not in filter['node_ids']:
+ result.remove(node)
+ if filter:
+ result = FilterList(filter, result)
+ return result
+
+def GetSlices(filter={}):
+ global DB
+ result = []
+ result.extend(DB['slices_list'])
+ if 'slice_ids' in filter:
+ for slice in DB['slices_list']:
+ if slice['slice_id'] not in filter['slice_ids']:
+ result.remove(slice)
+
+ if filter:
+ result = FilterList(filter, result)
+ return result
+
+
+def GetUsers(filter={}):
+ global DB
+ result = []
+ result.extend(DB['users_list'])
+ if 'user_ids' in filter:
+ for user in DB['users_list']:
+ if user['user_id'] not in filter['user_ids']:
+ result.remove(user)
+
+ if filter:
+ result = FilterList(filter, result)
+ return result
+
+
+#def GetKeys():
+
+
+
+#add
+
+def AddNode(node):
+ global DB
+ if not isinstance(node, dict):
+ return False
+ for key in node.keys():
+ if key not in ['hostname', 'type']:
+ return False
+ node['node_id'] = DB['node_index']
+ DB['node_index'] += 1
+ DB['nodes_list'].append(node)
+ return node['node_id']
+
+def AddSlice(slice):
+ global DB
+ if not isinstance(slice, dict):
+ return False
+ for key in slice.keys():
+ if key not in ['slice_name', 'user_ids', 'node_ids', 'enabled', 'expires']:
+ return False
+ slice['slice_id'] = DB['slice_index']
+ slice['expires'] = int(time.time())+60*60*24*30
+ DB['slice_index'] += 1
+ DB['slices_list'].append(slice)
+ return slice['slice_id']
+
+
+def AddUser(user):
+ global DB
+ if not isinstance(user, dict):
+ return False
+ for key in user.keys():
+ if key not in ['user_name', 'email', 'keys']:
+ return False
+ user['user_id'] = DB['user_index']
+ DB['user_index'] += 1
+ DB['users_list'].append(user)
+ return user['user_id']
+
+
+def AddUserKey(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for user in DB['users_list']:
+ if param['user_id'] == user['user_id']:
+ if 'keys' in user.keys():
+ user['keys'].append(param['key'])
+ else:
+ user['keys'] = [param['key']]
+ return True
+ return False
+ except:
+ return False
+
+def AddUserToSlice(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for slice in DB['slices_list']:
+ if param['slice_id'] == slice['slice_id']:
+ if not 'user_ids' in slice: slice['user_ids'] = []
+ slice['user_ids'].append(param['user_id'])
+ return True
+ return False
+ except:
+ return False
+
+def AddSliceToNodes(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for slice in DB['slices_list']:
+ if param['slice_id'] == slice['slice_id']:
+ if not 'node_ids' in slice: slice['node_ids'] = []
+ slice['node_ids'].extend(param['node_ids'])
+ return True
+ return False
+ except:
+ return False
+
+
+#Delete
+
+def DeleteNode(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for node in DB['nodes_list']:
+ if param['node_id'] == node['node_id']:
+ DB['nodes_list'].remove(node)
+ for slice in DB['slices_list']:
+ if param['node_id'] in slice['node_ids']:
+ slice['node_ids'].remove(param['node_id'])
+ return True
+ return False
+ except:
+ return False
+
+
+def DeleteSlice(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for slice in DB['slices_list']:
+ if param['slice_id'] == slice['slice_id']:
+ DB['slices_list'].remove(slice)
+ return True
+ return False
+ except:
+ return False
+
+
+def DeleteUser(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for user in DB['users_list']:
+ if param['user_id'] == user['user_id']:
+ DB['users_list'].remove(user)
+ for slice in DB['slices_list']:
+ if param['user_id'] in slice['user_ids']:
+ slice['user_ids'].remove(param['user_id'])
+ return True
+ return False
+ except:
+ return False
+
+
+def DeleteKey(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for user in DB['users_list']:
+ if param['key'] in user['keys']:
+ user['keys'].remove(param['key'])
+ return True
+ return False
+ except:
+ return False
+
+def DeleteUserFromSlice(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for slice in DB['slices_list']:
+ if param['slice_id'] == slice['slice_id'] and param['user_id'] in slice['user_ids']:
+ slice['user_ids'].remove(param['user_id'])
+ return True
+ return False
+ except:
+ return False
+
+
+def DeleteSliceFromNodes(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for slice in DB['slices_list']:
+ if param['slice_id'] == slice['slice_id']:
+ for node_id in param['node_ids']:
+ if node_id in slice['node_ids']: slice['node_ids'].remove(node_id)
+ return True
+ return False
+ except:
+ return False
+
+
+#Update
+
+def UpdateNode(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for node in DB['nodes_list']:
+ if param['node_id'] == node['node_id']:
+ for key in param['fields'].keys():
+ if key in ['hostname', 'type']:
+ node[key] = param['fields'][key]
+ return True
+ return False
+ except:
+ return False
+
+
+def UpdateSlice(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for slice in DB['slices_list']:
+ if param['slice_id'] == slice['slice_id']:
+ for key in param['fields'].keys():
+ if key in ['slice_name']:
+ slice[key] = param['fields'][key]
+ return True
+ return False
+ except:
+ return False
+
+
+def UpdateUser(param):
+ global DB
+ if not isinstance(param, dict):
+ return False
+ try:
+ for user in DB['users_list']:
+ if param['user_id'] == user['user_id']:
+ for key in param['fields'].keys():
+ if key in ['user_name', 'email']:
+ user[key] = param['fields'][key]
+ return True
+ return False
+ except:
+ return False
+
+
+
+
+# Instantiate the XMLRPC server
+dummy_api_server = SimpleXMLRPCServer.SimpleXMLRPCServer(dummy_api_addr)
+
+# RPC functions registration
+dummy_api_server.register_function(GetTestbedInfo)
+dummy_api_server.register_function(GetNodes)
+dummy_api_server.register_function(GetSlices)
+dummy_api_server.register_function(GetUsers)
+dummy_api_server.register_function(AddNode)
+dummy_api_server.register_function(AddSlice)
+dummy_api_server.register_function(AddUser)
+dummy_api_server.register_function(AddUserKey)
+dummy_api_server.register_function(AddUserToSlice)
+dummy_api_server.register_function(AddSliceToNodes)
+dummy_api_server.register_function(DeleteNode)
+dummy_api_server.register_function(DeleteSlice)
+dummy_api_server.register_function(DeleteUser)
+dummy_api_server.register_function(DeleteKey)
+dummy_api_server.register_function(DeleteUserFromSlice)
+dummy_api_server.register_function(DeleteSliceFromNodes)
+dummy_api_server.register_function(UpdateNode)
+dummy_api_server.register_function(UpdateSlice)
+dummy_api_server.register_function(UpdateUser)
+
+
+# Register Introspective functions
+dummy_api_server.register_introspection_functions()
+
+# Handle requests
+dummy_api_server.serve_forever()
+
+
+
--- /dev/null
+import xmlrpclib
+from datetime import datetime
+import time
+
+dummy_url = "http://localhost:8080"
+dummy_api = xmlrpclib.ServerProxy(dummy_url)
+
+# Edit the parameters with your user info:
+my_user_id = dummy_api.AddUser({'email': 'john.doe@test.net', 'user_name': 'john.doe', 'keys': ['copy here your ssh-rsa public key']})
+# Your user will be attached with the slice named : slice2 :
+dummy_api.AddUserToSlice({'slice_id': 2, 'user_id': my_user_id})
+
+
+print dummy_api.GetUsers()[-1]
+print dummy_api.GetSlices()[-1]
--- /dev/null
+#!/usr/bin/python
+from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
+from sfa.util.sfatime import utcparse, datetime_to_string
+from sfa.util.sfalogging import logger
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.dummy.dummyxrn import DummyXrn, hostname_to_urn, hrn_to_dummy_slicename, slicename_to_hrn
+
+import time
+
+class DummyAggregate:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_slice_and_slivers(self, slice_xrn):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ slice = None
+ if not slice_xrn:
+ return (slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = hrn_to_dummy_slicename(slice_hrn)
+ slices = self.driver.shell.GetSlices({'slice_name': slice_name})
+ if not slices:
+ return (slice, slivers)
+ slice = slices[0]
+
+ # sort slivers by node id
+ slice_nodes = []
+ if 'node_ids' in slice.keys():
+ slice_nodes = self.driver.shell.GetNodes({'node_ids': slice['node_ids']})
+ for node in slice_nodes:
+ slivers[node['node_id']] = node
+
+ return (slice, slivers)
+
+ def get_nodes(self, slice_xrn, slice=None,slivers=[], options={}):
+ # if we are dealing with a slice that has no node just return
+ # and empty list
+ if slice_xrn:
+ if not slice or 'node_ids' not in slice.keys() or not slice['node_ids']:
+ return []
+
+ filter = {}
+ if slice and 'node_ids' in slice and slice['node_ids']:
+ filter['node_ids'] = slice['node_ids']
+
+ nodes = self.driver.shell.GetNodes(filter)
+
+ rspec_nodes = []
+ for node in nodes:
+ rspec_node = Node()
+ # xxx how to retrieve site['login_base']
+ site=self.driver.testbedInfo
+ rspec_node['component_id'] = hostname_to_urn(self.driver.hrn, site['name'], node['hostname'])
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['authority_id'] = hrn_to_urn(DummyXrn.site_hrn(self.driver.hrn, site['name']), 'authority+sa')
+ rspec_node['exclusive'] = 'false'
+ rspec_node['hardware_types'] = [HardwareType({'name': 'plab-pc'}),
+ HardwareType({'name': 'pc'})]
+ # add site/interface info to nodes.
+ # assumes that sites, interfaces and tags have already been prepared.
+ if site['longitude'] and site['latitude']:
+ location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
+ rspec_node['location'] = location
+
+ if node['node_id'] in slivers:
+ # add sliver info
+ sliver = slivers[node['node_id']]
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+ # slivers always provide the ssh service
+ login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': slice['slice_name']})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
+ rspec_nodes.append(rspec_node)
+ return rspec_nodes
+
+
+
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+
+ slice, slivers = self.get_slice_and_slivers(slice_xrn)
+ rspec = RSpec(version=rspec_version, user_options=options)
+ if slice and 'expires' in slice:
+ rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
+
+ nodes = self.get_nodes(slice_xrn, slice, slivers, options)
+ rspec.version.add_nodes(nodes)
+ # add sliver defaults
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ for attrib in default_sliver_attribs:
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+
+ return rspec.toxml()
+
+
--- /dev/null
+import time
+import datetime
+#
+from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
+from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf
+from sfa.util.cache import Cache
+
+# one would think the driver should not need to mess with the SFA db, but..
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+# used to be used in get_ticket
+#from sfa.trust.sfaticket import SfaTicket
+
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+# the driver interface, mostly provides default behaviours
+from sfa.managers.driver import Driver
+
+from sfa.dummy.dummyshell import DummyShell
+from sfa.dummy.dummyaggregate import DummyAggregate
+from sfa.dummy.dummyslices import DummySlices
+from sfa.dummy.dummyxrn import DummyXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_dummy_slicename, xrn_to_hostname
+
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ return dict ( [ (rec[key],rec) for rec in recs ] )
+
+#
+# DummyShell is just an xmlrpc serverproxy where methods can be sent as-is;
+#
+class DummyDriver (Driver):
+
+ # the cache instance is a class member so it survives across incoming requests
+ cache = None
+
+ def __init__ (self, config):
+ Driver.__init__ (self, config)
+ self.config = config
+ self.hrn = config.SFA_INTERFACE_HRN
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.shell = DummyShell (config)
+ self.testbedInfo = self.shell.GetTestbedInfo()
+
+ ########################################
+ ########## registry oriented
+ ########################################
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ ##########
+ def register (self, sfa_record, hrn, pub_key):
+ type = sfa_record['type']
+ dummy_record = self.sfa_fields_to_dummy_fields(type, hrn, sfa_record)
+
+ if type == 'authority':
+ pointer = -1
+
+ elif type == 'slice':
+ slices = self.shell.GetSlices({'slice_name': dummy_record['slice_name']})
+ if not slices:
+ pointer = self.shell.AddSlice(dummy_record)
+ else:
+ pointer = slices[0]['slice_id']
+
+ elif type == 'user':
+ users = self.shell.GetUsers({'email':sfa_record['email']})
+ if not users:
+ pointer = self.shell.AddUser(dummy_record)
+ else:
+ pointer = users[0]['user_id']
+
+ # Add the user's key
+ if pub_key:
+ self.shell.AddUserKey({'user_id' : pointer, 'key' : pub_key})
+
+ elif type == 'node':
+ nodes = self.shell.GetNodes(dummy_record['hostname'])
+ if not nodes:
+ pointer = self.shell.AddNode(dummy_record)
+ else:
+ pointer = users[0]['node_id']
+
+ return pointer
+
+ ##########
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+ pointer = old_sfa_record['pointer']
+ type = old_sfa_record['type']
+ dummy_record=self.sfa_fields_to_dummy_fields(type, hrn, new_sfa_record)
+
+ # new_key implemented for users only
+ if new_key and type not in [ 'user' ]:
+ raise UnknownSfaType(type)
+
+
+ if type == "slice":
+ self.shell.UpdateSlice({'slice_id': pointer, 'fields': dummy_record})
+
+ elif type == "user":
+ self.shell.UpdateUser({'user_id': pointer, 'fields': dummy_record})
+
+ if new_key:
+ self.shell.AddUserKey({'user_id' : pointer, 'key' : new_key})
+
+ elif type == "node":
+ self.shell.UpdateNode({'node_id': pointer, 'fields': dummy_record})
+
+
+ return True
+
+
+ ##########
+ def remove (self, sfa_record):
+ type=sfa_record['type']
+ pointer=sfa_record['pointer']
+ if type == 'user':
+ self.shell.DeleteUser({'user_id': pointer})
+ elif type == 'slice':
+ self.shell.DeleteSlice({'slice_id': pointer})
+ elif type == 'node':
+ self.shell.DeleteNode({'node_id': pointer})
+
+ return True
+
+
+
+
+
+ ##
+ # Convert SFA fields to Dummy testbed fields for use when registering or updating
+ # registry record in the dummy testbed
+ #
+
+ def sfa_fields_to_dummy_fields(self, type, hrn, sfa_record):
+
+ dummy_record = {}
+
+ if type == "slice":
+ dummy_record["slice_name"] = hrn_to_dummy_slicename(hrn)
+
+ elif type == "node":
+ if "hostname" not in sfa_record:
+ raise MissingSfaInfo("hostname")
+ dummy_record["hostname"] = sfa_record["hostname"]
+ if "type" in sfa_record:
+ dummy_record["type"] = sfa_record["type"]
+ else:
+ dummy_record["type"] = "dummy_type"
+
+ elif type == "authority":
+ dummy_record["name"] = hrn
+
+ elif type == "user":
+ dummy_record["user_name"] = sfa_record["email"].split('@')[0]
+ dummy_record["email"] = sfa_record["email"]
+
+ return dummy_record
+
+ ####################
+ def fill_record_info(self, records):
+ """
+ Given a (list of) SFA record, fill in the DUMMY TESTBED specific
+ and SFA specific fields in the record.
+ """
+ if not isinstance(records, list):
+ records = [records]
+
+ self.fill_record_dummy_info(records)
+ self.fill_record_hrns(records)
+ self.fill_record_sfa_info(records)
+ return records
+
+ def fill_record_dummy_info(self, records):
+ """
+ Fill in the DUMMY specific fields of a SFA record. This
+ involves calling the appropriate DUMMY method to retrieve the
+ database record for the object.
+
+ @param record: record to fill in field (in/out param)
+ """
+ # get ids by type
+ node_ids, slice_ids, user_ids = [], [], []
+ type_map = {'node': node_ids, 'slice': slice_ids, 'user': user_ids}
+
+ for record in records:
+ for type in type_map:
+ if type == record['type']:
+ type_map[type].append(record['pointer'])
+
+ # get dummy records
+ nodes, slices, users = {}, {}, {}
+ if node_ids:
+ node_list = self.shell.GetNodes({'node_ids':node_ids})
+ nodes = list_to_dict(node_list, 'node_id')
+ if slice_ids:
+ slice_list = self.shell.GetSlices({'slice_ids':slice_ids})
+ slices = list_to_dict(slice_list, 'slice_id')
+ if user_ids:
+ user_list = self.shell.GetUsers({'user_ids': user_ids})
+ users = list_to_dict(user_list, 'user_id')
+
+ dummy_records = {'node': nodes, 'slice': slices, 'user': users}
+
+
+ # fill record info
+ for record in records:
+ # records with pointer==-1 do not have dummy info.
+ if record['pointer'] == -1:
+ continue
+
+ for type in dummy_records:
+ if record['type'] == type:
+ if record['pointer'] in dummy_records[type]:
+ record.update(dummy_records[type][record['pointer']])
+ break
+ # fill in key info
+ if record['type'] == 'user':
+ record['key_ids'] = []
+ recors['keys'] = []
+ for key in dummy_records['user'][record['pointer']]['keys']:
+ record['key_ids'].append(-1)
+ recors['keys'].append(key)
+
+ return records
+
+ def fill_record_hrns(self, records):
+ """
+ convert dummy ids to hrns
+ """
+
+ # get ids
+ slice_ids, user_ids, node_ids = [], [], []
+ for record in records:
+ if 'user_ids' in record:
+ user_ids.extend(record['user_ids'])
+ if 'slice_ids' in record:
+ slice_ids.extend(record['slice_ids'])
+ if 'node_ids' in record:
+ node_ids.extend(record['node_ids'])
+
+ # get dummy records
+ slices, users, nodes = {}, {}, {}
+ if user_ids:
+ user_list = self.shell.GetUsers({'user_ids': user_ids})
+ users = list_to_dict(user_list, 'user_id')
+ if slice_ids:
+ slice_list = self.shell.GetSlices({'slice_ids': slice_ids})
+ slices = list_to_dict(slice_list, 'slice_id')
+ if node_ids:
+ node_list = self.shell.GetNodes({'node_ids': node_ids})
+ nodes = list_to_dict(node_list, 'node_id')
+
+ # convert ids to hrns
+ for record in records:
+ # get all relevant data
+ type = record['type']
+ pointer = record['pointer']
+ testbed_name = self.testbed_name()
+ auth_hrn = self.hrn
+ if pointer == -1:
+ continue
+
+ if 'user_ids' in record:
+ emails = [users[user_id]['email'] for user_id in record['user_ids'] \
+ if user_id in users]
+ usernames = [email.split('@')[0] for email in emails]
+ user_hrns = [".".join([auth_hrn, testbed_name, username]) for username in usernames]
+ record['users'] = user_hrns
+ if 'slice_ids' in record:
+ slicenames = [slices[slice_id]['slice_name'] for slice_id in record['slice_ids'] \
+ if slice_id in slices]
+ slice_hrns = [slicename_to_hrn(auth_hrn, slicename) for slicename in slicenames]
+ record['slices'] = slice_hrns
+ if 'node_ids' in record:
+ hostnames = [nodes[node_id]['hostname'] for node_id in record['node_ids'] \
+ if node_id in nodes]
+ node_hrns = [hostname_to_hrn(auth_hrn, login_base, hostname) for hostname in hostnames]
+ record['nodes'] = node_hrns
+
+
+ return records
+
+ def fill_record_sfa_info(self, records):
+
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
+ # get user ids
+ user_ids = []
+ for record in records:
+ user_ids.extend(record.get("user_ids", []))
+
+ # get sfa records for all records associated with these records.
+ # we'll replace pl ids (person_ids) with hrns from the sfa records
+ # we obtain
+
+ # get the registry records
+ user_list, users = [], {}
+ user_list = dbsession.query (RegRecord).filter(RegRecord.pointer.in_(user_ids))
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ users = defaultdict(list)
+ for user in user_list:
+ users[user.pointer].append(user)
+
+ # get the dummy records
+ dummy_user_list, dummy_users = [], {}
+ dummy_user_list = self.shell.GetUsers({'user_ids': user_ids})
+ dummy_users = list_to_dict(dummy_user_list, 'user_id')
+
+ # fill sfa info
+ for record in records:
+ # skip records with no pl info (top level authorities)
+ #if record['pointer'] == -1:
+ # continue
+ sfa_info = {}
+ type = record['type']
+ logger.info("fill_record_sfa_info - incoming record typed %s"%type)
+ if (type == "slice"):
+ # all slice users are researchers
+ record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+ record['PI'] = []
+ record['researcher'] = []
+ for user_id in record.get('user_ids', []):
+ hrns = [user.hrn for user in users[user_id]]
+ record['researcher'].extend(hrns)
+
+ elif (type.startswith("authority")):
+ record['url'] = None
+ logger.info("fill_record_sfa_info - authority xherex")
+
+ elif (type == "node"):
+ sfa_info['dns'] = record.get("hostname", "")
+ # xxx TODO: URI, LatLong, IP, DNS
+
+ elif (type == "user"):
+ logger.info('setting user.email')
+ sfa_info['email'] = record.get("email", "")
+ sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+ sfa_info['geni_certificate'] = record['gid']
+ # xxx TODO: PostalAddress, Phone
+ record.update(sfa_info)
+
+
+ ####################
+ def update_relation (self, subject_type, target_type, relation_name, subject_id, target_ids):
+ # hard-wire the code for slice/user for now, could be smarter if needed
+ if subject_type =='slice' and target_type == 'user' and relation_name == 'researcher':
+ subject=self.shell.GetSlices ({'slice_id': subject_id})[0]
+ if 'user_ids' not in subject.keys():
+ subject['user_ids'] = []
+ current_target_ids = subject['user_ids']
+ add_target_ids = list ( set (target_ids).difference(current_target_ids))
+ del_target_ids = list ( set (current_target_ids).difference(target_ids))
+ logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+ for target_id in add_target_ids:
+ self.shell.AddUserToSlice ({'user_id': target_id, 'slice_id': subject_id})
+ logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ for target_id in del_target_ids:
+ logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ self.shell.DeleteUserFromSlice ({'user_id': target_id, 'slice_id': subject_id})
+ else:
+ logger.info('unexpected relation %s to maintain, %s -> %s'%(relation_name,subject_type,target_type))
+
+
+ ########################################
+ ########## aggregate oriented
+ ########################################
+
+ def testbed_name (self): return "dummy"
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+ def list_slices (self, creds, options):
+
+ slices = self.shell.GetSlices()
+ slice_hrns = [slicename_to_hrn(self.hrn, slice['slice_name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ return slice_urns
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ aggregate = DummyAggregate(self)
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
+ options=options)
+
+ return rspec
+
+ def sliver_status (self, slice_urn, slice_hrn):
+ # find out where this slice is currently running
+ slice_name = hrn_to_dummy_slicename(slice_hrn)
+
+ slice = self.shell.GetSlices({'slice_name': slice_name})
+ if len(slices) == 0:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ # report about the local nodes only
+ nodes = self.shell.GetNodes({'node_ids':slice['node_ids']})
+
+ if len(nodes) == 0:
+ raise SliverDoesNotExist("You have not allocated any slivers here")
+
+ # get login info
+ user = {}
+ keys = []
+ if slice['user_ids']:
+ users = self.shell.GetUsers({'user_ids': slice['user_ids']})
+ for user in users:
+ keys.extend(user['keys'])
+
+ user.update({'urn': slice_urn,
+ 'login': slice['slice_name'],
+ 'protocol': ['ssh'],
+ 'port': ['22'],
+ 'keys': keys})
+
+
+ result = {}
+ top_level_status = 'unknown'
+ if nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['dummy_login'] = slice['slice_name']
+ result['dummy_expires'] = datetime_to_string(utcparse(slice['expires']))
+ result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
+
+ resources = []
+ for node in nodes:
+ res = {}
+ res['dummy_hostname'] = node['hostname']
+ res['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
+ sliver_id = Xrn(slice_urn, type='slice', id=node['node_id'], authority=self.hrn).urn
+ res['geni_urn'] = sliver_id
+ res['geni_status'] = 'ready'
+ res['geni_error'] = ''
+ res['users'] = [users]
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ return result
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+ aggregate = DummyAggregate(self)
+ slices = DummySlices(self)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record=None
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ # ensure slice record exists
+ slice = slices.verify_slice(slice_hrn, slice_record, sfa_peer, options=options)
+ # ensure user records exists
+ #users = slices.verify_users(slice_hrn, slice, users, sfa_peer, options=options)
+
+ # add/remove slice from nodes
+ requested_slivers = []
+ for node in rspec.version.get_nodes_with_slivers():
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name').strip()
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id').strip())
+ if hostname:
+ requested_slivers.append(hostname)
+ requested_slivers_ids = []
+ for hostname in requested_slivers:
+ node_id = self.shell.GetNodes({'hostname': hostname})[0]['node_id']
+ requested_slivers_ids.append(node_id)
+ nodes = slices.verify_slice_nodes(slice, requested_slivers_ids)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ slicename = hrn_to_dummy_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'slice_name': slicename})
+ if not slices:
+ return True
+ slice = slices[0]
+
+ try:
+ self.shell.DeleteSliceFromNodes({'slice_id': slice['slice_id'], 'node_ids': slice['node_ids']})
+ return True
+ except:
+ return False
+
+ def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+ slicename = hrn_to_dummy_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'slice_name': slicename})
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice = slices[0]
+ requested_time = utcparse(expiration_time)
+ record = {'expires': int(datetime_to_epoch(requested_time))}
+ try:
+ self.shell.UpdateSlice({'slice_id': slice['slice_id'], 'fields':record})
+ return True
+ except:
+ return False
+
+ # set the 'enabled' tag to True
+ def start_slice (self, slice_urn, slice_hrn, creds):
+ slicename = hrn_to_dummy_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'slice_name': slicename})
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice_id = slices[0]['slice_id']
+ slice_enabled = slices[0]['enabled']
+ # just update the slice enabled tag
+ if not slice_enabled:
+ self.shell.UpdateSlice({'slice_id': slice_id, 'fields': {'enabled': True}})
+ return 1
+
+ # set the 'enabled' tag to False
+ def stop_slice (self, slice_urn, slice_hrn, creds):
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'slice_name': slicename})
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice_id = slices[0]['slice_id']
+ slice_enabled = slices[0]['enabled']
+ # just update the slice enabled tag
+ if slice_enabled:
+ self.shell.UpdateSlice({'slice_id': slice_id, 'fields': {'enabled': False}})
+ return 1
+
+ def reset_slice (self, slice_urn, slice_hrn, creds):
+ raise SfaNotImplemented ("reset_slice not available at this interface")
+
+ def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
+ raise SfaNotImplemented,"DummyDriver.get_ticket needs a rewrite"
--- /dev/null
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+
+from sfa.util.sfalogging import logger
+
+class DummyShell:
+ """
+ A simple xmlrpc shell to the dummy testbed API instance
+
+ """
+
+ direct_calls = ['AddNode', 'AddSlice', 'AddUser', 'AddUserKey', 'AddUserToSlice', 'AddSliceToNodes',
+ 'GetTestbedInfo', 'GetNodes', 'GetSlices', 'GetUsers',
+ 'DeleteNode', 'DeleteSlice', 'DeleteUser', 'DeleteKey', 'DeleteUserFromSlice',
+ 'DeleteSliceFromNodes',
+ 'UpdateNode', 'UpdateSlice', 'UpdateUser',
+ ]
+
+
+ def __init__ ( self, config ) :
+ url = config.SFA_DUMMY_URL
+ self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
+
+ def __getattr__(self, name):
+ def func(*args, **kwds):
+ if not name in DummyShell.direct_calls:
+ raise Exception, "Illegal method call %s for DUMMY driver"%(name)
+ result=getattr(self.proxy, name)(*args, **kwds)
+ logger.debug('DummyShell %s returned ... '%(name))
+ return result
+ return func
+
--- /dev/null
+from types import StringTypes
+from collections import defaultdict
+
+from sfa.util.sfatime import utcparse, datetime_to_epoch
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
+
+from sfa.rspecs.rspec import RSpec
+
+from sfa.dummy.dummyxrn import DummyXrn, hrn_to_dummy_slicename
+
+MAXINT = 2L**31-1
+
+class DummySlices:
+
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_slivers(self, xrn, node=None):
+ hrn, type = urn_to_hrn(xrn)
+
+ slice_name = hrn_to_dummy_slicename(hrn)
+
+ slices = self.driver.shell.GetSlices({'slice_name': slice_name})
+ slice = slices[0]
+ # Build up list of users and slice attributes
+ user_ids = slice['user_ids']
+ # Get user information
+ all_users_list = self.driver.shell.GetUsers({'user_id':user_ids})
+ all_users = {}
+ for user in all_users_list:
+ all_users[user['user_id']] = user
+
+ # Build up list of keys
+ all_keys = set()
+ for user in all_users_list:
+ all_keys.extend(user['keys'])
+
+ slivers = []
+ for slice in slices:
+ keys = all_keys
+ # XXX Sanity check; though technically this should be a system invariant
+ # checked with an assertion
+ if slice['expires'] > MAXINT: slice['expires']= MAXINT
+
+ slivers.append({
+ 'hrn': hrn,
+ 'name': slice['name'],
+ 'slice_id': slice['slice_id'],
+ 'expires': slice['expires'],
+ 'keys': keys,
+ })
+
+ return slivers
+
+ def get_sfa_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+
+ def verify_slice_nodes(self, slice, requested_slivers):
+ if 'node_ids' not in slice.keys():
+ slice['node_ids']=[]
+ nodes = self.driver.shell.GetNodes({'node_ids': slice['node_ids']})
+ current_slivers = [node['node_id'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+
+ # add nodes from rspec
+ added_nodes = list(set(requested_slivers).difference(current_slivers))
+
+ try:
+ self.driver.shell.AddSliceToNodes({'slice_id': slice['slice_id'], 'node_ids': added_nodes})
+ self.driver.shell.DeleteSliceFromNodes({'slice_id': slice['slice_id'], 'node_ids': deleted_nodes})
+
+ except:
+ logger.log_exc('Failed to add/remove slice from nodes')
+ return nodes
+
+
+
+ def verify_slice(self, slice_hrn, slice_record, sfa_peer, options={}):
+ slicename = hrn_to_dummy_slicename(slice_hrn)
+ parts = slicename.split("_")
+ login_base = parts[0]
+ slices = self.driver.shell.GetSlices({'slice_name': slicename})
+ if not slices:
+ slice = {'slice_name': slicename}
+ # add the slice
+ slice['slice_id'] = self.driver.shell.AddSlice(slice)
+ slice['node_ids'] = []
+ slice['user_ids'] = []
+ else:
+ slice = slices[0]
+ if slice_record.get('expires'):
+ requested_expires = int(datetime_to_epoch(utcparse(slice_record['expires'])))
+ if requested_expires and slice['expires'] != requested_expires:
+ self.driver.shell.UpdateSlice( {'slice_id': slice['slice_id'], 'fields':{'expires' : requested_expires}})
+
+ return slice
+
+ def verify_users(self, slice_hrn, slice_record, users, sfa_peer, options={}):
+ users_by_email = {}
+ users_dict = {}
+ users_by_site = {}
+ for user in users:
+ user['urn'] = user['urn'].lower()
+ hrn, type = urn_to_hrn(user['urn'])
+ username = get_leaf(hrn)
+ login_base = DummyXrn(xrn=user['urn']).dummy_login_base()
+ user['username'] = username
+ user['site'] = login_base
+
+ if 'email' in user:
+ user['email'] = user['email'].lower()
+ users_by_email[user['email']] = user
+ users_dict[user['email']] = user
+ else:
+ users_by_site[user['site']].append(user)
+
+ # start building a list of existing users
+ existing_user_ids = []
+ existing_user_ids_filter = []
+ if users_by_email:
+ existing_user_ids_filter.extend(users_by_email.keys())
+ if users_by_site:
+ for login_base in users_by_site:
+ users = users_by_site[login_base]
+ for user in users:
+ existing_user_ids_filter.append(user['username']+'@geni.net')
+ if existing_user_ids_filter:
+ # get existing users by email
+ existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
+ ['person_id', 'key_ids', 'email'])
+ existing_user_ids.extend([user['email'] for user in existing_users])
+
+ if users_by_site:
+ # get a list of user sites (based on requeste user urns
+ site_list = self.driver.shell.GetSites(users_by_site.keys(), \
+ ['site_id', 'login_base', 'person_ids'])
+ # get all existing users at these sites
+ sites = {}
+ site_user_ids = []
+ for site in site_list:
+ sites[site['site_id']] = site
+ site_user_ids.extend(site['person_ids'])
+
+ existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
+ ['person_id', 'key_ids', 'email', 'site_ids'])
+
+ # all requested users are either existing users or new (added) users
+ for login_base in users_by_site:
+ requested_site_users = users_by_site[login_base]
+ for requested_user in requested_site_users:
+ user_found = False
+ for existing_user in existing_site_persons_list:
+ for site_id in existing_user['site_ids']:
+ if site_id in sites:
+ site = sites[site_id]
+ if login_base == site['login_base'] and \
+ existing_user['email'].startswith(requested_user['username']+'@'):
+ existing_user_ids.append(existing_user['email'])
+ requested_user['email'] = existing_user['email']
+ users_dict[existing_user['email']] = requested_user
+ user_found = True
+ break
+ if user_found:
+ break
+
+ if user_found == False:
+ fake_email = requested_user['username'] + '@geni.net'
+ requested_user['email'] = fake_email
+ users_dict[fake_email] = requested_user
+
+ # requested slice users
+ requested_user_ids = users_dict.keys()
+ # existing slice users
+ existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
+ existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
+ ['person_id', 'key_ids', 'email'])
+ existing_slice_user_ids = [user['email'] for user in existing_slice_users]
+
+ # users to be added, removed or updated
+ added_user_ids = set(requested_user_ids).difference(existing_user_ids)
+ added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
+ removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
+ updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
+
+ # Remove stale users (only if we are not appending).
+ # Append by default.
+ append = options.get('append', True)
+ if append == False:
+ for removed_user_id in removed_user_ids:
+ self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
+ # update_existing users
+ updated_users_list = [user for user in users_dict.values() if user['email'] in \
+ updated_user_ids]
+ self.verify_keys(existing_slice_users, updated_users_list, options)
+
+ added_persons = []
+ # add new users
+ for added_user_id in added_user_ids:
+ added_user = users_dict[added_user_id]
+ hrn, type = urn_to_hrn(added_user['urn'])
+ person = {
+ 'first_name': added_user.get('first_name', hrn),
+ 'last_name': added_user.get('last_name', hrn),
+ 'email': added_user_id,
+ 'peer_person_id': None,
+ 'keys': [],
+ 'key_ids': added_user.get('key_ids', []),
+ }
+ person['person_id'] = self.driver.shell.AddPerson(person)
+ added_persons.append(person)
+
+ # enable the account
+ self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
+
+ # add person to site
+ self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
+
+ for key_string in added_user.get('keys', []):
+ key = {'key':key_string, 'key_type':'ssh'}
+ key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
+ person['keys'].append(key)
+
+ # add the registry record
+# if sfa_peer:
+# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
+# 'pointer': person['person_id']}
+# self.registry.register_peer_object(self.credential, peer_dict)
+
+ for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
+ # add person to the slice
+ self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
+ # if this is a peer record then it should already be bound to a peer.
+ # no need to return worry about it getting bound later
+
+ return added_persons
+
+
+ def verify_keys(self, old_users, new_users, options={}):
+ # existing keys
+ existing_keys = []
+ for user in old_users:
+ existing_keys.append(user['keys'])
+ userdict = {}
+ for user in old_users:
+ userdict[user['email']] = user
+
+ # add new keys
+ requested_keys = []
+ updated_users = []
+ for user in new_users:
+ user_keys = user.get('keys', [])
+ updated_users.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = key_string
+ try:
+ self.driver.shell.AddUserKey({'user_id': user['user_id'], 'key':key})
+
+ except:
+ pass
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for key in removed_keys:
+ try:
+ self.driver.shell.DeleteKey({'key': key})
+ except:
+ pass
+
+
--- /dev/null
+# specialized Xrn class for Dummy TB
+import re
+from sfa.util.xrn import Xrn
+
+# temporary helper functions to use this module instead of namespace
+def hostname_to_hrn (auth, testbed_name, hostname):
+ return DummyXrn(auth=auth+'.'+testbed_name,hostname=hostname).get_hrn()
+def hostname_to_urn(auth, testbed_name, hostname):
+ return DummyXrn(auth=auth+'.'+testbed_name,hostname=hostname).get_urn()
+def slicename_to_hrn (auth_hrn, slicename):
+ return DummyXrn(auth=auth_hrn,slicename=slicename).get_hrn()
+def email_to_hrn (auth_hrn, email):
+ return DummyXrn(auth=auth_hrn, email=email).get_hrn()
+def hrn_to_dummy_slicename (hrn):
+ return DummyXrn(xrn=hrn,type='slice').dummy_slicename()
+def hrn_to_dummy_authname (hrn):
+ return DummyXrn(xrn=hrn,type='any').dummy_authname()
+def xrn_to_hostname(hrn):
+ return Xrn.unescape(PlXrn(xrn=hrn, type='node').get_leaf())
+
+class DummyXrn (Xrn):
+
+ @staticmethod
+ def site_hrn (auth, testbed_name):
+ return '.'.join([auth,testbed_name])
+
+ def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
+ #def hostname_to_hrn(auth_hrn, login_base, hostname):
+ if hostname is not None:
+ self.type='node'
+ # keep only the first part of the DNS name
+ #self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
+ # escape the '.' in the hostname
+ self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
+ self.hrn_to_urn()
+ #def slicename_to_hrn(auth_hrn, slicename):
+ elif slicename is not None:
+ self.type='slice'
+ # split at the first _
+ parts = slicename.split("_",1)
+ self.hrn = ".".join([auth] + parts )
+ self.hrn_to_urn()
+ #def email_to_hrn(auth_hrn, email):
+ elif email is not None:
+ self.type='person'
+ # keep only the part before '@' and replace special chars into _
+ self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
+ self.hrn_to_urn()
+ elif interface is not None:
+ self.type = 'interface'
+ self.hrn = auth + '.' + interface
+ self.hrn_to_urn()
+ else:
+ Xrn.__init__ (self,**kwargs)
+
+ #def hrn_to_pl_slicename(hrn):
+ def dummy_slicename (self):
+ self._normalize()
+ leaf = self.leaf
+ sliver_id_parts = leaf.split(':')
+ name = sliver_id_parts[0]
+ name = re.sub('[^a-zA-Z0-9_]', '', name)
+ return name
+
+ #def hrn_to_pl_authname(hrn):
+ def dummy_authname (self):
+ self._normalize()
+ return self.authority[-1]
+
+ def interface_name(self):
+ self._normalize()
+ return self.leaf
+
+ def dummy_login_base (self):
+ self._normalize()
+ if self.type and self.type.startswith('authority'):
+ base = self.leaf
+ else:
+ base = self.authority[-1]
+
+ # Fix up names of GENI Federates
+ base = base.lower()
+ base = re.sub('\\\[^a-zA-Z0-9]', '', base)
+
+ if len(base) > 20:
+ base = base[len(base)-20:]
+
+ return base
return "federica"
def list_slices (self, creds, options):
- return self.response(self.shell.listSlices())
+ # the issue is that federica returns the list of slice's urn in a string format
+ # this is why this dirty hack is needed until federica fixes it.
+ slices_str = self.shell.listSlices()['value'][1:-1]
+ slices_list = slices_str.split(", ")
+ return slices_list
def sliver_status (self, slice_urn, slice_hrn):
return "fddriver.sliver_status: undefined/todo for slice %s"%slice_hrn
--- /dev/null
+from sfa.generic import Generic
+
+class dummy (Generic):
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.dummyimporter
+ return sfa.importer.dummyimporter.DummyImporter
+
+ # use the standard api class
+ def api_class (self):
+ import sfa.server.sfaapi
+ return sfa.server.sfaapi.SfaApi
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.dummy.dummydriver
+ return sfa.dummy.dummydriver.DummyDriver
+
+
--- /dev/null
+from sfa.generic import Generic
+
+class nitos (Generic):
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.nitosimporter
+ return sfa.importer.nitosimporter.NitosImporter
+
+ # use the standard api class
+ def api_class (self):
+ import sfa.server.sfaapi
+ return sfa.server.sfaapi.SfaApi
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.nitos.nitosdriver
+ return sfa.nitos.nitosdriver.NitosDriver
+
+ # for the component mode, to be run on board planetlab nodes
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
--- /dev/null
+from sfa.generic import Generic
+
+import sfa.server.sfaapi
+
+
+
+class slab (Generic):
+
+ # use the standard api class
+ def api_class (self):
+ return sfa.server.sfaapi.SfaApi
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.slabimporter
+ return sfa.importer.slabimporter.SlabImporter
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.senslab.slabdriver
+ return sfa.senslab.slabdriver.SlabDriver
+
+ # slab does not have a component manager yet
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
+
+
--- /dev/null
+from sfa.generic import Generic
+
+class teagle (Generic):
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.dummyimporter
+ return sfa.importer.dummyimporter.DummyImporter
+
+ # use the standard api class
+ def api_class (self):
+ import sfa.server.sfaapi
+ return sfa.server.sfaapi.SfaApi
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import teaglesfa.driver
+ return teaglesfa.driver.TeagleDriver
+# import sfa.dummy.dummydriver
+# return sfa.dummy.dummydriver.DummyDriver
+
#!/usr/bin/python
import sys
+from datetime import datetime
from sfa.util.xrn import get_authority, hrn_to_urn
from sfa.generic import Generic
generic=Generic.the_flavour()
importer_class = generic.importer_class()
if importer_class:
- self.logger.info ("Using flavour %s for importing (class %s)"%\
- (generic.flavour,importer_class.__name__))
+ begin_time=datetime.now()
+ self.logger.info ("Starting import on %s, using class %s from flavour %s"%\
+ (begin_time,importer_class.__name__,generic.flavour))
testbed_importer = importer_class (auth_hierarchy, self.logger)
if testbed_importer:
testbed_importer.add_options(options)
testbed_importer.run (options)
+ end_time=datetime.now()
+ duration=end_time-begin_time
+ self.logger.info("Import took %s"%duration)
--- /dev/null
+#
+# Dummy importer
+#
+# requirements
+#
+# read the planetlab database and update the local registry database accordingly
+# so we update the following collections
+# . authorities (from pl sites)
+# . node (from pl nodes)
+# . users+keys (from pl persons and attached keys)
+# known limitation : *one* of the ssh keys is chosen at random here
+# xxx todo/check xxx at the very least, when a key is known to the registry
+# and is still current in plc
+# then we should definitely make sure to keep that one in sfa...
+# . slice+researchers (from pl slices and attached users)
+#
+
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+
+from sfa.trust.gid import create_uuid
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+from sfa.dummy.dummyshell import DummyShell
+from sfa.dummy.dummyxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_dummy_slicename
+
+def _get_site_hrn(interface_hrn, site):
+ hrn = ".".join([interface_hrn, site['name']])
+ return hrn
+
+
+class DummyImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ # hrn hash is initialized from current db
+ # remember just-created records as we go
+ # xxx might make sense to add a UNIQUE constraint in the db itself
+ def remember_record_by_hrn (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("DummyImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ # ditto for pointer hash
+ def remember_record_by_pointer (self, record):
+ if record.pointer == -1:
+ self.logger.warning ("DummyImporter.remember_record_by_pointer: pointer is void")
+ return
+ tuple = (record.type, record.pointer)
+ if tuple in self.records_by_type_pointer:
+ self.logger.warning ("DummyImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+ def remember_record (self, record):
+ self.remember_record_by_hrn (record)
+ self.remember_record_by_pointer (record)
+
+ def locate_by_type_hrn (self, type, hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ return self.records_by_type_pointer.get ( (type, pointer), None)
+
+ # a convenience/helper function to see if a record is already known
+ # a former, broken, attempt (in 2.1-9) had been made
+ # to try and use 'pointer' as a first, most significant attempt
+ # the idea being to preserve stuff as much as possible, and thus
+ # to avoid creating a new gid in the case of a simple hrn rename
+ # however this of course doesn't work as the gid depends on the hrn...
+ #def locate (self, type, hrn=None, pointer=-1):
+ # if pointer!=-1:
+ # attempt = self.locate_by_type_pointer (type, pointer)
+ # if attempt : return attempt
+ # if hrn is not None:
+ # attempt = self.locate_by_type_hrn (type, hrn,)
+ # if attempt : return attempt
+ # return None
+
+ # this makes the run method a bit abtruse - out of the way
+
+ def run (self, options):
+ config = Config ()
+ interface_hrn = config.SFA_INTERFACE_HRN
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ shell = DummyShell (config)
+
+ ######## retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ # create hash by (type,hrn)
+ # we essentially use this to know if a given record is already known to SFA
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (record.type, record.pointer) , record ) for record in all_records
+ if record.pointer != -1] )
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records: record.stale=True
+
+ ######## retrieve Dummy TB data
+ # Get all plc sites
+ # retrieve only required stuf
+ sites = [shell.GetTestbedInfo()]
+ # create a hash of sites by login_base
+# sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
+ # Get all dummy TB users
+ users = shell.GetUsers()
+ # create a hash of users by user_id
+ users_by_id = dict ( [ ( user['user_id'], user) for user in users ] )
+ # Get all dummy TB public keys
+ keys = []
+ for user in users:
+ if 'keys' in user:
+ keys.extend(user['keys'])
+ # create a dict user_id -> [ keys ]
+ keys_by_person_id = {}
+ for user in users:
+ if 'keys' in user:
+ keys_by_person_id[user['user_id']] = user['keys']
+ # Get all dummy TB nodes
+ nodes = shell.GetNodes()
+ # create hash by node_id
+ nodes_by_id = dict ( [ ( node['node_id'], node, ) for node in nodes ] )
+ # Get all dummy TB slices
+ slices = shell.GetSlices()
+ # create hash by slice_id
+ slices_by_id = dict ( [ (slice['slice_id'], slice ) for slice in slices ] )
+
+
+ # start importing
+ for site in sites:
+ site_hrn = _get_site_hrn(interface_hrn, site)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ site_record=self.locate_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer= -1,
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("DummyImporter: imported authority (site) : %s" % site_record)
+ self.remember_record (site_record)
+ except:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("DummyImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records
+ for node in nodes:
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ node_hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
+ # xxx this sounds suspicious
+ if len(node_hrn) > 64: node_hrn = node_hrn[:64]
+ node_record = self.locate_by_type_hrn ( 'node', node_hrn )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(node_hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = RegNode (hrn=node_hrn, gid=node_gid,
+ pointer =node['node_id'],
+ authority=get_authority(node_hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("DummyImporter: imported node: %s" % node_record)
+ self.remember_record (node_record)
+ except:
+ self.logger.log_exc("DummyImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+ node_record.stale=False
+
+ site_pis=[]
+ # import users
+ for user in users:
+ user_hrn = email_to_hrn(site_hrn, user['email'])
+ # xxx suspicious again
+ if len(user_hrn) > 64: user_hrn = user_hrn[:64]
+ user_urn = hrn_to_urn(user_hrn, 'user')
+
+ user_record = self.locate_by_type_hrn ( 'user', user_hrn)
+
+ # return a tuple pubkey (a dummy TB key object) and pkey (a Keypair object)
+
+ def init_user_key (user):
+ pubkey = None
+ pkey = None
+ if user['keys']:
+ # randomly pick first key in set
+ for key in user['keys']:
+ pubkey = key
+ try:
+ pkey = convert_public_key(pubkey)
+ break
+ except:
+ continue
+ if not pkey:
+ self.logger.warn('DummyImporter: unable to convert public key for %s' % user_hrn)
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("DummyImporter: user %s does not have a NITOS public key"%user_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+ # new user
+ try:
+ if not user_record:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ user_gid.set_email(user['email'])
+ user_record = RegUser (hrn=user_hrn, gid=user_gid,
+ pointer=user['user_id'],
+ authority=get_authority(user_hrn),
+ email=user['email'])
+ if pubkey:
+ user_record.reg_keys=[RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("DummyImporter: imported person: %s" % user_record)
+ self.remember_record ( user_record )
+
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+ def key_in_list (key,sfa_keys):
+ for reg_key in sfa_keys:
+ if reg_key.key==key: return True
+ return False
+ # is there a new key in Dummy TB ?
+ new_keys=False
+ for key in user['keys']:
+ if not key_in_list (key,sfa_keys):
+ new_keys = True
+ if new_keys:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ self.logger.info("DummyImporter: updated person: %s" % user_record)
+ user_record.email = user['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("DummyImporter: failed to import user %d %s"%(user['user_id'],user['email']))
+
+
+ # import slices
+ for slice in slices:
+ slice_hrn = slicename_to_hrn(site_hrn, slice['slice_name'])
+ slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
+ if not slice_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer=slice['slice_id'],
+ authority=get_authority(slice_hrn))
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+ self.logger.info("DummyImporter: imported slice: %s" % slice_record)
+ self.remember_record ( slice_record )
+ except:
+ self.logger.log_exc("DummyImporter: failed to import slice")
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+ slice_record.reg_researchers = \
+ [ self.locate_by_type_pointer ('user',user_id) for user_id in slice['user_ids'] ]
+ dbsession.commit()
+ slice_record.stale=False
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+ for record in all_records:
+ try: stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("DummyImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
--- /dev/null
+
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+
+from sfa.trust.gid import create_uuid
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+from sfa.nitos.nitosshell import NitosShell
+from sfa.nitos.nitosxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_nitos_slicename, username_to_hrn
+
+def _get_site_hrn(interface_hrn, site):
+ hrn = ".".join([interface_hrn, site['name']])
+ return hrn
+
+
+class NitosImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ # hrn hash is initialized from current db
+ # remember just-created records as we go
+ # xxx might make sense to add a UNIQUE constraint in the db itself
+ def remember_record_by_hrn (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("NitosImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ # ditto for pointer hash
+ def remember_record_by_pointer (self, record):
+ if record.pointer == -1:
+ self.logger.warning ("NitosImporter.remember_record_by_pointer: pointer is void")
+ return
+ tuple = (record.type, record.pointer)
+ if tuple in self.records_by_type_pointer:
+ self.logger.warning ("NitosImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+ def remember_record (self, record):
+ self.remember_record_by_hrn (record)
+ self.remember_record_by_pointer (record)
+
+ def locate_by_type_hrn (self, type, hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ return self.records_by_type_pointer.get ( (type, pointer), None)
+
+ # a convenience/helper function to see if a record is already known
+ # a former, broken, attempt (in 2.1-9) had been made
+ # to try and use 'pointer' as a first, most significant attempt
+ # the idea being to preserve stuff as much as possible, and thus
+ # to avoid creating a new gid in the case of a simple hrn rename
+ # however this of course doesn't work as the gid depends on the hrn...
+ #def locate (self, type, hrn=None, pointer=-1):
+ # if pointer!=-1:
+ # attempt = self.locate_by_type_pointer (type, pointer)
+ # if attempt : return attempt
+ # if hrn is not None:
+ # attempt = self.locate_by_type_hrn (type, hrn,)
+ # if attempt : return attempt
+ # return None
+
+ # this makes the run method a bit abtruse - out of the way
+
+ def run (self, options):
+ config = Config ()
+ interface_hrn = config.SFA_INTERFACE_HRN
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ shell = NitosShell (config)
+
+ ######## retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ # create hash by (type,hrn)
+ # we essentially use this to know if a given record is already known to SFA
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (record.type, record.pointer) , record ) for record in all_records
+ if record.pointer != -1] )
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records: record.stale=True
+
+ ######## retrieve NITOS data
+ # Get site info
+ # retrieve only required stuf
+ site = shell.getTestbedInfo()
+ sites = [site]
+ # create a hash of sites by login_base
+# # sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
+ # Get all NITOS users
+ users = shell.getUsers()
+ # create a hash of users by user_id
+ users_by_id = dict ( [ ( user['user_id'], user) for user in users ] )
+ # Get all NITOS public keys
+ # accumulate key ids for keys retrieval
+# key_ids = []
+# for person in persons:
+# key_ids.extend(person['key_ids'])
+# keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids,
+# 'key_type': 'ssh'} )
+# # create a hash of keys by key_id
+# keys_by_id = dict ( [ ( key['key_id'], key ) for key in keys ] )
+ # create a dict user_id -> [ (nitos)keys ]
+ keys_by_user_id = dict ( [ ( user['user_id'], user['keys']) for user in users ] )
+ # Get all nitos nodes
+ nodes = shell.getNodes({}, [])
+ # create hash by node_id
+ nodes_by_id = dict ( [ (node['node_id'], node) for node in nodes ] )
+ # Get all nitos slices
+ slices = shell.getSlices({}, [])
+ # create hash by slice_id
+ slices_by_id = dict ( [ (slice['slice_id'], slice) for slice in slices ] )
+
+
+ # start importing
+ for site in sites:
+ #for i in [0]:
+ site_hrn = _get_site_hrn(interface_hrn, site)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ site_record=self.locate_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer=0,
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported authority (site) : %s" % site_record)
+ self.remember_record (site_record)
+ except:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("NitosImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records
+ for node in nodes:
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ node_hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
+ # xxx this sounds suspicious
+ if len(node_hrn) > 64: node_hrn = node_hrn[:64]
+ node_record = self.locate_by_type_hrn ( 'node', node_hrn )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(node_hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = RegNode (hrn=node_hrn, gid=node_gid,
+ pointer =node['node_id'],
+ authority=get_authority(node_hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported node: %s" % node_record)
+ self.remember_record (node_record)
+ except:
+ self.logger.log_exc("NitosImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+
+ node_record.stale=False
+
+
+ # import users
+ for user in users:
+ user_hrn = username_to_hrn(interface_hrn, site['name'], user['username'])
+ # xxx suspicious again
+ if len(user_hrn) > 64: user_hrn = user_hrn[:64]
+ user_urn = hrn_to_urn(user_hrn, 'user')
+
+ user_record = self.locate_by_type_hrn ( 'user', user_hrn)
+
+ # return a tuple pubkey (a nitos key object) and pkey (a Keypair object)
+ def init_user_key (user):
+ pubkey = None
+ pkey = None
+ if user['keys']:
+ # randomly pick first key in set
+ for key in user['keys']:
+ pubkey = key
+ try:
+ pkey = convert_public_key(pubkey)
+ break
+ except:
+ continue
+ if not pkey:
+ self.logger.warn('NitosImporter: unable to convert public key for %s' % user_hrn)
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("NitosImporter: user %s does not have a NITOS public key"%user_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+ # new user
+ try:
+ if not user_record:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ user_gid.set_email(user['email'])
+ user_record = RegUser (hrn=user_hrn, gid=user_gid,
+ pointer=user['user_id'],
+ authority=get_authority(user_hrn),
+ email=user['email'])
+ if pubkey:
+ user_record.reg_keys=[RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported user: %s" % user_record)
+ self.remember_record ( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+
+ def sfa_key_in_list (sfa_key,nitos_user_keys):
+ for nitos_key in nitos_user_keys:
+ if nitos_key==sfa_key: return True
+ return False
+ # are all the SFA keys known to nitos ?
+ new_keys=False
+ if not sfa_keys and user['keys']:
+ new_keys = True
+ else:
+ for sfa_key in sfa_keys:
+ if not sfa_key_in_list (sfa_key.key,user['keys']):
+ new_keys = True
+
+ if new_keys:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ user_record.gid = user_gid
+ user_record.just_updated()
+ self.logger.info("NitosImporter: updated user: %s" % user_record)
+ user_record.email = user['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("NitosImporter: failed to import user %s %s"%(user['user_id'],user['email']))
+
+
+ # import slices
+ for slice in slices:
+ slice_hrn = slicename_to_hrn(interface_hrn, site['name'], slice['slice_name'])
+ slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
+ if not slice_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer=slice['slice_id'],
+ authority=get_authority(slice_hrn))
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported slice: %s" % slice_record)
+ self.remember_record ( slice_record )
+ except:
+ self.logger.log_exc("NitosImporter: failed to import slice")
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+ slice_record.reg_researchers = \
+ [ self.locate_by_type_pointer ('user',int(user_id)) for user_id in slice['user_ids'] ]
+ dbsession.commit()
+ slice_record.stale=False
+
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+ for record in all_records:
+ try: stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("NitosImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
+
+
['person_id', 'email', 'key_ids', 'site_ids', 'role_ids'])
# create a hash of persons by person_id
persons_by_id = dict ( [ ( person['person_id'], person) for person in persons ] )
+ # also gather non-enabled user accounts so as to issue relevant warnings
+ disabled_persons = shell.GetPersons({'peer_id': None, 'enabled': False}, ['person_id'])
+ disabled_person_ids = [ person['person_id'] for person in disabled_persons ]
# Get all plc public keys
# accumulate key ids for keys retrieval
key_ids = []
except:
# if the site import fails then there is no point in trying to import the
# site's child records (node, slices, persons), so skip them.
- self.logger.log_exc("PlImporter: failed to import site. Skipping child records")
+ self.logger.log_exc("PlImporter: failed to import site %s. Skipping child records"%site_hrn)
continue
else:
# xxx update the record ...
self.logger.info("PlImporter: imported node: %s" % node_record)
self.remember_record (node_record)
except:
- self.logger.log_exc("PlImporter: failed to import node")
+ self.logger.log_exc("PlImporter: failed to import node %s"%node_hrn)
+ continue
else:
# xxx update the record ...
pass
site_pis=set()
# import persons
for person_id in site['person_ids']:
- try:
- person = persons_by_id[person_id]
- except:
- self.logger.warning ("PlImporter: cannot locate person_id %s - ignored"%person_id)
+ proceed=False
+ if person_id in persons_by_id:
+ person=persons_by_id[person_id]
+ proceed=True
+ elif person_id in disabled_person_ids:
+ pass
+ else:
+ self.logger.warning ("PlImporter: cannot locate person_id %s in site %s - ignored"%(person_id,site_hrn))
+ # make sure to NOT run this if anything is wrong
+ if not proceed: continue
+
person_hrn = email_to_hrn(site_hrn, person['email'])
# xxx suspicious again
if len(person_hrn) > 64: person_hrn = person_hrn[:64]
(pubkey,pkey) = init_person_key (person, plc_keys )
person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey, email=person['email'])
user_record = RegUser (hrn=person_hrn, gid=person_gid,
- pointer=person['person_id'],
- authority=get_authority(person_hrn),
- email=person['email'])
+ pointer=person['person_id'],
+ authority=get_authority(person_hrn),
+ email=person['email'])
if pubkey:
user_record.reg_keys=[RegKey (pubkey['key'], pubkey['key_id'])]
else:
self.remember_record ( user_record )
else:
# update the record ?
- # if user's primary key has changed then we need to update the
+ #
+ # if a user key has changed then we need to update the
# users gid by forcing an update here
+ #
+ # right now, SFA only has *one* key attached to a user, and this is
+ # the key that the GID was made with
+ # so the logic here is, we consider that things are OK (unchanged) if
+ # all the SFA keys are present as PLC keys
+ # otherwise we trigger the creation of a new gid from *some* plc key
+ # and record this on the SFA side
+ # it would make sense to add a feature in PLC so that one could pick a 'primary'
+ # key but this is not available on the myplc side for now
+ # = or = it would be much better to support several keys in SFA but that
+ # does not seem doable without a major overhaul in the data model as
+ # a GID is attached to a hrn, but it's also linked to a key, so...
+ # NOTE: with this logic, the first key entered in PLC remains the one
+ # current in SFA until it is removed from PLC
sfa_keys = user_record.reg_keys
- def key_in_list (key,sfa_keys):
- for reg_key in sfa_keys:
- if reg_key.key==key['key']: return True
+ def sfa_key_in_list (sfa_key,plc_keys):
+ for plc_key in plc_keys:
+ if plc_key['key']==sfa_key.key:
+ return True
return False
- # is there a new key in myplc ?
+ # are all the SFA keys known to PLC ?
new_keys=False
- for key in plc_keys:
- if not key_in_list (key,sfa_keys):
- new_keys = True
+ if not sfa_keys and plc_keys:
+ new_keys=True
+ else:
+ for sfa_key in sfa_keys:
+ if not sfa_key_in_list (sfa_key,plc_keys):
+ new_keys = True
if new_keys:
(pubkey,pkey) = init_person_key (person, plc_keys)
person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ person_gid.set_email(person['email'])
if not pubkey:
user_record.reg_keys=[]
else:
user_record.reg_keys=[ RegKey (pubkey['key'], pubkey['key_id'])]
+ user_record.gid = person_gid
+ user_record.just_updated()
self.logger.info("PlImporter: updated person: %s" % user_record)
user_record.email = person['email']
dbsession.commit()
# maintain the list of PIs for a given site
site_record.reg_pis = list(site_pis)
+ site_record.reg_pis = site_pis
dbsession.commit()
# import slices
self.logger.info("PlImporter: imported slice: %s" % slice_record)
self.remember_record ( slice_record )
except:
- self.logger.log_exc("PlImporter: failed to import slice")
+ self.logger.log_exc("PlImporter: failed to import slice %s (%s)"%(slice_hrn,slice['name']))
else:
# update the pointer if it has changed
if slice_id != slice_record.pointer:
--- /dev/null
+import sys
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_authority, hrn_to_urn
+
+from sfa.senslab.slabdriver import SlabDriver
+
+from sfa.trust.certificate import Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, \
+ RegUser, RegKey
+from sfa.util.sfalogging import logger
+
+from sqlalchemy.exc import SQLAlchemyError
+
+
+def _get_site_hrn(site):
+ hrn = site['name']
+ return hrn
+
+class SlabImporter:
+
+ def __init__ (self, auth_hierarchy, loc_logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger = loc_logger
+ self.logger.setLevelDebug()
+
+ def hostname_to_hrn_escaped(self, root_auth, hostname):
+ return '.'.join( [root_auth, Xrn.escape(hostname)] )
+
+
+
+ def slicename_to_hrn(self, person_hrn):
+ return (person_hrn +'_slice')
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ def find_record_by_type_hrn(self, record_type, hrn):
+ return self.records_by_type_hrn.get ( (record_type, hrn), None)
+
+ def locate_by_type_pointer (self, record_type, pointer):
+ print >>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer .........................."
+ ret = self.records_by_type_pointer.get ( (record_type, pointer), None)
+ print >>sys.stderr, " \r\n \r\n \t SLABPOSTGRES locate_by_type_pointer "
+ return ret
+
+ def update_just_added_records_dict (self, record):
+ rec_tuple = (record.type, record.hrn)
+ if rec_tuple in self.records_by_type_hrn:
+ self.logger.warning ("SlabImporter.update_just_added_records_dict: duplicate (%s,%s)"%rec_tuple)
+ return
+ self.records_by_type_hrn [ rec_tuple ] = record
+
+ def run (self, options):
+ config = Config()
+
+ slabdriver = SlabDriver(config)
+
+ #Create special slice table for senslab
+
+ if not slabdriver.db.exists('slab_xp'):
+ slabdriver.db.createtable()
+ self.logger.info ("SlabImporter.run: slab_xp table created ")
+
+ #retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records:
+ record.stale = True
+ #create hash by (type,hrn)
+ #used to know if a given record is already known to SFA
+
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
+ print>>sys.stderr,"\r\n SLABIMPORT \t all_records[0] %s all_records[0].email %s \r\n" %(all_records[0].type, all_records[0])
+ self.users_rec_by_email = \
+ dict ( [ (record.email, record) for record in all_records if record.type == 'user' ] )
+
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (str(record.type),record.pointer) , record ) for record in all_records if record.pointer != -1] )
+
+
+ nodes_listdict = slabdriver.slab_api.GetNodes()
+ nodes_by_id = dict([(node['node_id'],node) for node in nodes_listdict])
+ sites_listdict = slabdriver.slab_api.GetSites()
+
+ ldap_person_listdict = slabdriver.slab_api.GetPersons()
+ print>>sys.stderr,"\r\n SLABIMPORT \t ldap_person_listdict %s \r\n" %(ldap_person_listdict)
+ slices_listdict = slabdriver.slab_api.GetSlices()
+ try:
+ slices_by_userid = dict ( [ (one_slice['reg_researchers']['record_id'], one_slice ) for one_slice in slices_listdict ] )
+ except TypeError:
+ self.logger.log_exc("SlabImporter: failed to create list of slices by user id.")
+ pass
+
+ for site in sites_listdict:
+ site_hrn = _get_site_hrn(site)
+ site_record = self.find_record_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer='-1',
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported authority (site) : %s" % site_record)
+ self.update_just_added_records_dict(site_record)
+ except SQLAlchemyError:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("SlabImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale = False
+
+ # import node records in site
+ for node_id in site['node_ids']:
+ try:
+ node = nodes_by_id[node_id]
+ except:
+ self.logger.warning ("SlabImporter: cannot find node_id %s - ignored"%node_id)
+ continue
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ escaped_hrn = self.hostname_to_hrn_escaped(slabdriver.slab_api.root_auth, node['hostname'])
+ print>>sys.stderr, "\r\n \r\n SLABIMPORTER node %s " %(node)
+ hrn = node['hrn']
+
+
+ # xxx this sounds suspicious
+ if len(hrn) > 64: hrn = hrn[:64]
+ node_record = self.find_record_by_type_hrn( 'node', hrn )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(escaped_hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ def slab_get_authority(hrn):
+ return hrn.split(".")[0]
+
+ node_record = RegNode (hrn=hrn, gid=node_gid,
+ pointer = '-1',
+ authority=slab_get_authority(hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ #self.logger.info("SlabImporter: imported node: %s" % node_record)
+ self.update_just_added_records_dict(node_record)
+ except:
+ self.logger.log_exc("SlabImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+ node_record.stale=False
+
+
+ # import persons
+ for person in ldap_person_listdict :
+
+
+ print>>sys.stderr,"SlabImporter: person: %s" %(person)
+ if 'ssh-rsa' not in person['pkey']:
+ #people with invalid ssh key (ssh-dss, empty, bullshit keys...)
+ #won't be imported
+ continue
+ person_hrn = person['hrn']
+ slice_hrn = self.slicename_to_hrn(person['hrn'])
+
+ # xxx suspicious again
+ if len(person_hrn) > 64: person_hrn = person_hrn[:64]
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+
+ print>>sys.stderr," \r\n SlabImporter: HEYYYYYYYYYY" , self.users_rec_by_email
+
+ #Check if user using person['email'] form LDAP is already registered
+ #in SFA. One email = one person. Inb this case, do not create another
+ #record for this person
+ #person_hrn returned by GetPErson based on senslab root auth + uid ldap
+ user_record = self.find_record_by_type_hrn('user', person_hrn)
+ if not user_record and person['email'] in self.users_rec_by_email:
+ user_record = self.users_rec_by_email[person['email']]
+ person_hrn = user_record.hrn
+ person_urn = hrn_to_urn(person_hrn, 'user')
+
+
+ slice_record = self.find_record_by_type_hrn ('slice', slice_hrn)
+
+ # return a tuple pubkey (a plc key object) and pkey (a Keypair object)
+ def init_person_key (person, slab_key):
+ pubkey = None
+ if person['pkey']:
+ # randomly pick first key in set
+ pubkey = slab_key
+
+ try:
+ pkey = convert_public_key(pubkey)
+ except TypeError:
+ #key not good. create another pkey
+ self.logger.warn('SlabImporter: \
+ unable to convert public \
+ key for %s' % person_hrn)
+ pkey = Keypair(create=True)
+
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("SlabImporter: person %s does not have a public key"%person_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+
+ try:
+ slab_key = person['pkey']
+ # new person
+ if not user_record:
+ (pubkey,pkey) = init_person_key (person, slab_key )
+ if pubkey is not None and pkey is not None :
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if person['email']:
+ print>>sys.stderr, "\r\n \r\n SLAB IMPORTER PERSON EMAIL OK email %s " %(person['email'])
+ person_gid.set_email(person['email'])
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn),
+ email=person['email'])
+ else:
+ user_record = RegUser (hrn=person_hrn, gid=person_gid,
+ pointer='-1',
+ authority=get_authority(person_hrn))
+
+ if pubkey:
+ user_record.reg_keys = [RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("SlabImporter: imported person: %s" % user_record)
+ self.update_just_added_records_dict( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+
+ new_key=False
+ if slab_key is not sfa_keys :
+ new_key = True
+ if new_key:
+ print>>sys.stderr,"SlabImporter: \t \t USER UPDATE person: %s" %(person['hrn'])
+ (pubkey,pkey) = init_person_key (person, slab_key)
+ person_gid = self.auth_hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ self.logger.info("SlabImporter: updated person: %s" % user_record)
+
+ if person['email']:
+ user_record.email = person['email']
+
+ dbsession.commit()
+
+ user_record.stale = False
+ except:
+ self.logger.log_exc("SlabImporter: failed to import person %s"%(person) )
+
+ try:
+ single_slice = slices_by_userid[user_record.record_id]
+ except:
+ self.logger.warning ("SlabImporter: cannot locate slices_by_userid[user_record.record_id] %s - ignored"%user_record)
+
+ if not slice_record :
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer='-1',
+ authority=get_authority(slice_hrn))
+
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+
+ #Serial id created after commit
+ #Get it
+ sl_rec = dbsession.query(RegSlice).filter(RegSlice.hrn.match(slice_hrn)).all()
+
+ #slab_slice = SenslabXP( slice_hrn = slice_hrn, record_id_slice=sl_rec[0].record_id, record_id_user= user_record.record_id)
+ #print>>sys.stderr, "\r\n \r\n SLAB IMPORTER SLICE IMPORT NOTslice_record %s \r\n slab_slice %s" %(sl_rec,slab_slice)
+ #slab_dbsession.add(slab_slice)
+ #slab_dbsession.commit()
+ #self.logger.info("SlabImporter: imported slice: %s" % slice_record)
+ self.update_just_added_records_dict ( slice_record )
+
+ except:
+ self.logger.log_exc("SlabImporter: failed to import slice")
+
+ #No slice update upon import in senslab
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+
+
+ slice_record.reg_researchers = [user_record]
+ dbsession.commit()
+ slice_record.stale=False
+
+
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [slabdriver.hrn, slabdriver.slab_api.root_auth, \
+ slabdriver.hrn+ '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+
+ for record in all_records:
+ if record.type == 'user':
+ print>>sys.stderr,"SlabImporter: stale records: hrn %s %s" \
+ %(record.hrn,record.stale)
+ try:
+ stale = record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("SlabImporter: deleting stale record: %s" % record)
+ #if record.type == 'user':
+ #rec = slab_dbsession.query(SenslabXP).filter_by(record_id_user = record.record_id).first()
+ #slab_dbsession.delete(rec)
+ #slab_dbsession.commit()
+ dbsession.delete(record)
+ dbsession.commit()
+
+
+
# the following is used in Resolve (registry) when run in full mode
# after looking up the sfa db, we wish to be able to display
# testbed-specific info as well
- # this at minima should fill in the 'researcher' field for slice records
- # as this information is then used to compute rights
- # roadmap: there is an intention to redesign the SFA database so as to clear up
- # this constraint, based on the principle that SFA should not rely on the
- # testbed database to perform such a core operation (i.e. getting rights right)
+ # based on the principle that SFA should not rely on the testbed database
+ # to perform such a core operation (i.e. getting rights right)
+ # this is no longer in use when performing other SFA operations
def augment_records_with_testbed_info (self, sfa_records):
return sfa_records
# to the actual method calls anyway
self.manager = manager(config)
else:
+ # that's what happens when there's something wrong with the db
+ # or any bad stuff of that kind at startup time
+ logger.log_exc("Failed to create a manager, startup sequence is broken")
raise SfaAPIError,"Argument to ManagerWrapper must be a module or class"
self.interface = interface
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
-from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey
+from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey, \
+ augment_with_sfa_builtins
from sfa.storage.alchemy import dbsession
+### the types that we need to exclude from sqlobjects before being able to dump
+# them on the xmlrpc wire
+from sqlalchemy.orm.collections import InstrumentedList
class RegistryManager:
if not record:
raise RecordNotFound("hrn=%s, type=%s"%(hrn,type))
- # xxx for the record only
- # used to call this, which was wrong, now all needed data is natively is our DB
- # self.driver.augment_records_with_testbed_info (record.__dict__)
- # likewise, we deprecate is_enabled which was not really useful
- # if not self.driver.is_enabled (record.__dict__): ...
- # xxx for the record only
-
# get the callers gid
# if caller_xrn is not specified assume the caller is the record
# object itself.
return new_cred.save_to_string(save_parents=True)
- def Resolve(self, api, xrns, type=None, full=True):
+ # the default for full, which means 'dig into the testbed as well', should be false
+ def Resolve(self, api, xrns, type=None, details=False):
if not isinstance(xrns, types.ListType):
# try to infer type if not set and we get a single input
credential = api.getCredential()
interface = api.registries[registry_hrn]
server_proxy = api.server_proxy(interface, credential)
- peer_records = server_proxy.Resolve(xrns, credential,type)
+ # should propagate the details flag but that's not supported in the xmlrpc interface yet
+ #peer_records = server_proxy.Resolve(xrns, credential,type, details=details)
+ peer_records = server_proxy.Resolve(xrns, credential)
# pass foreign records as-is
# previous code used to read
# records.extend([SfaRecord(dict=record).as_dict() for record in peer_records])
if type:
local_records = local_records.filter_by(type=type)
local_records=local_records.all()
- logger.info("Resolve: local_records=%s (type=%s)"%(local_records,type))
+
+ for local_record in local_records:
+ augment_with_sfa_builtins (local_record)
+
+ logger.info("Resolve, (details=%s,type=%s) local_records=%s "%(details,type,local_records))
local_dicts = [ record.__dict__ for record in local_records ]
- if full:
- # in full mode we get as much info as we can, which involves contacting the
+ if details:
+ # in details mode we get as much info as we can, which involves contacting the
# testbed for getting implementation details about the record
self.driver.augment_records_with_testbed_info(local_dicts)
# also we fill the 'url' field for known authorities
# xxx somehow here calling dict(record) issues a weird error
# however record.todict() seems to work fine
# records.extend( [ dict(record) for record in local_records ] )
- records.extend( [ record.todict() for record in local_records ] )
+ records.extend( [ record.todict(exclude_types=[InstrumentedList]) for record in local_records ] )
+
if not records:
raise RecordNotFound(str(hrns))
record_dicts = record_list
# if we still have not found the record yet, try the local registry
+# logger.debug("before trying local records, %d foreign records"% len(record_dicts))
if not record_dicts:
recursive = False
if ('recursive' in options and options['recursive']):
if not api.auth.hierarchy.auth_exists(hrn):
raise MissingAuthority(hrn)
if recursive:
- records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn))
+ records = dbsession.query(RegRecord).filter(RegRecord.hrn.startswith(hrn)).all()
+# logger.debug("recursive mode, found %d local records"%(len(records)))
else:
- records = dbsession.query(RegRecord).filter_by(authority=hrn)
- record_dicts=[ record.todict() for record in records ]
+ records = dbsession.query(RegRecord).filter_by(authority=hrn).all()
+# logger.debug("non recursive mode, found %d local records"%(len(records)))
+ # so that sfi list can show more than plain names...
+ for record in records: augment_with_sfa_builtins (record)
+ record_dicts=[ record.todict(exclude_types=[InstrumentedList]) for record in records ]
return record_dicts
####################
# utility for handling relationships among the SFA objects
- # given that the SFA db does not handle this sort of relationsships
- # it will rely on side-effects in the testbed to keep this persistent
# subject_record describes the subject of the relationships
# ref_record contains the target values for the various relationships we need to manage
- # (to begin with, this is just the slice x person relationship)
+ # (to begin with, this is just the slice x person (researcher) and authority x person (pi) relationships)
def update_driver_relations (self, subject_obj, ref_obj):
type=subject_obj.type
#for (k,v) in subject_obj.__dict__.items(): print k,'=',v
# is there a change in keys ?
new_key=None
if type=='user':
- if getattr(new_key,'keys',None):
+ if getattr(new_record,'keys',None):
new_key=new_record.keys
if isinstance (new_key,types.ListType):
new_key=new_key[0]
urn = hrn_to_urn(hrn,type)
gid_object = api.auth.hierarchy.create_gid(urn, uuid, pkey)
gid = gid_object.save_to_string(save_parents=True)
- record.gid = gid
- dsession.commit()
# xxx should do side effects from new_record to record
# not too sure how to do that
if isinstance (record, RegSlice):
researcher_hrns = getattr(new_record,'researcher',None)
if researcher_hrns is not None: record.update_researchers (researcher_hrns)
- dbsession.commit()
elif isinstance (record, RegAuthority):
pi_hrns = getattr(new_record,'pi',None)
if pi_hrns is not None: record.update_pis (pi_hrns)
- dbsession.commit()
# update the PLC information that was specified with the record
# xxx oddly enough, without this useless statement,
# anyway the driver should receive an object
# (and then extract __dict__ itself if needed)
print "DO NOT REMOVE ME before driver.update, record=%s"%record
- if not self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key):
- logger.warning("driver.update failed")
-
+ new_key_pointer = -1
+ try:
+ (pointer, new_key_pointer) = self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key)
+ except:
+ pass
+ if new_key and new_key_pointer:
+ record.reg_keys=[ RegKey (new_key, new_key_pointer)]
+ record.gid = gid
+
+ dbsession.commit()
# update membership for researchers, pis, owners, operators
self.update_driver_relations (record, new_record)
--- /dev/null
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0" datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+ <start>
+ <ref name="RSpec"/>
+ </start>
+ <define name="RSpec">
+ <element name="RSpec">
+ <attribute name="type">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <choice>
+ <ref name="network"/>
+ <ref name="request"/>
+ </choice>
+ </element>
+ </define>
+ <define name="network">
+ <element name="network">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </optional>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="site"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver_defaults">
+ <element name="sliver_defaults">
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="site">
+ <element name="site">
+ <attribute name="id">
+ <data type="ID"/>
+ </attribute>
+ <element name="name">
+ <text/>
+ </element>
+ <zeroOrMore>
+ <ref name="node"/>
+ </zeroOrMore>
+ </element>
+ </define>
+ <define name="node">
+ <element name="node">
+ <attribute name="node_id">
+ <data type="ID"/>
+ </attribute>
+ <element name="hostname">
+ <text/>
+ </element>
+ <attribute name="reservable">
+ <data type="boolean"/>
+ </attribute>
+ <element name="ip_address">
+ <text/>
+ </element>
+ <optional>
+ <element name="urn">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <ref name="leases"/>
+ </optional>
+ <optional>
+ <ref name="sliver"/>
+ </optional>
+ </element>
+ </define>
+ <define name="request">
+ <element name="request">
+ <attribute name="name">
+ <data type="NMTOKEN"/>
+ </attribute>
+ <optional>
+ <ref name="sliver_defaults"/>
+ </optional>
+ <oneOrMore>
+ <ref name="sliver"/>
+ </oneOrMore>
+ </element>
+ </define>
+ <define name="sliver">
+ <element name="sliver">
+ <optional>
+ <attribute name="nodeid">
+ <data type="ID"/>
+ </attribute>
+ </optional>
+ <ref name="sliver_elements"/>
+ </element>
+ </define>
+ <define name="sliver_elements">
+ <interleave>
+ <optional>
+ <element name="capabilities">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="delegations">
+ <text/>
+ </element>
+ </optional>
+ <optional>
+ <element name="program">
+ <text/>
+ </element>
+ </optional>
+ </interleave>
+ </define>
+ <define name="leases">
+ <element name="leases">
+ <zeroOrMore>
+ <group>
+ <attribute name="slot"/>
+ <data type="dateTime"/>
+ </attribute>
+ <attribute name="slice">
+ <data type="NMTOKEN"/>
+ </attribute>
+ </group>
+ </zeroOrMore>
+</grammar>
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
# resolve the record
- records = self.api.manager.Resolve(self.api, xrns, full = False)
+ records = self.api.manager.Resolve(self.api, xrns, details = False)
if not records:
raise RecordNotFound(xrns)
# another registry if needed
# I wonder if this is truly the intention, or shouldn't we instead
# only look in the local db ?
- records = self.api.manager.Resolve(self.api, xrn, type)
+ records = self.api.manager.Resolve(self.api, xrn, type, details=False)
if not records:
raise RecordNotFound(hrn)
interfaces = ['registry']
+ # should we not accept an optional 'details' argument ?
accepts = [
Mixed(Parameter(str, "Human readable name (hrn or urn)"),
Parameter(list, "List of Human readable names ([hrn])")),
Mixed(Parameter(str, "Credential string"),
- Parameter(list, "List of credentials)"))
+ Parameter(list, "List of credentials)")),
+ Parameter(dict, "options"),
]
# xxx used to be [SfaRecord]
returns = [Parameter(dict, "registry record")]
- def call(self, xrns, creds):
+ def call(self, xrns, creds, options={}):
+ # use details=False by default, only when explicitly specified do we want
+ # to mess with the testbed details
+ if 'details' in options: details=options['details']
+ else: details=False
type = None
if not isinstance(xrns, types.ListType):
type = Xrn(xrns).get_type()
self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrns, self.name))
# send the call to the right manager
- return self.api.manager.Resolve(self.api, xrns, type)
+ return self.api.manager.Resolve(self.api, xrns, type, details=details)
--- /dev/null
+#!/usr/bin/python
+from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
+from sfa.util.sfatime import utcparse, datetime_to_string
+from sfa.util.sfalogging import logger
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.position_3d import Position3D
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.elements.channel import Channel
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.nitos.nitosxrn import NitosXrn, hostname_to_urn, hrn_to_nitos_slicename, slicename_to_hrn
+from sfa.planetlab.vlink import get_tc_rate
+from sfa.planetlab.topology import Topology
+
+import time
+
+class NitosAggregate:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def get_slice_and_slivers(self, slice_xrn):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ slice = None
+ if not slice_xrn:
+ return (slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices({'slice_name': slice_name}, [])
+ #filter results
+ for slc in slices:
+ if slc['slice_name'] == slice_name:
+ slice = slc
+ break
+
+ if not slice:
+ return (slice, slivers)
+
+ reserved_nodes = self.driver.shell.getReservedNodes({'slice_id': slice['slice_id']}, [])
+ reserved_node_ids = []
+ # filter on the slice
+ for node in reserved_nodes:
+ if node['slice_id'] == slice['slice_id']:
+ reserved_node_ids.append(node['node_id'])
+ #get all the nodes
+ all_nodes = self.driver.shell.getNodes({}, [])
+
+ for node in all_nodes:
+ if node['node_id'] in reserved_node_ids:
+ slivers[node['node_id']] = node
+
+ return (slice, slivers)
+
+
+
+ def get_nodes(self, slice_xrn, slice=None,slivers={}, options={}):
+ # if we are dealing with a slice that has no node just return
+ # and empty list
+ if slice_xrn:
+ if not slice or not slivers:
+ return []
+ else:
+ nodes = [slivers[sliver] for sliver in slivers]
+ else:
+ nodes = self.driver.shell.getNodes({}, [])
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.testbedInfo['grain']
+ #grain = 1800
+
+
+ rspec_nodes = []
+ for node in nodes:
+ rspec_node = Node()
+ site_name = self.driver.testbedInfo['name']
+ rspec_node['component_id'] = hostname_to_urn(self.driver.hrn, site_name, node['hostname'])
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['authority_id'] = hrn_to_urn(NitosXrn.site_hrn(self.driver.hrn, site_name), 'authority+sa')
+ # do not include boot state (<available> element) in the manifest rspec
+ #if not slice:
+ # rspec_node['boot_state'] = node['boot_state']
+ rspec_node['exclusive'] = 'true'
+ # site location
+ longitude = self.driver.testbedInfo['longitude']
+ latitude = self.driver.testbedInfo['latitude']
+ if longitude and latitude:
+ location = Location({'longitude': longitude, 'latitude': latitude, 'country': 'unknown'})
+ rspec_node['location'] = location
+ # 3D position
+ position_3d = Position3D({'x': node['position']['X'], 'y': node['position']['Y'], 'z': node['position']['Z']})
+ #position_3d = Position3D({'x': 1, 'y': 2, 'z': 3})
+ rspec_node['position_3d'] = position_3d
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+
+ # HardwareType
+ rspec_node['hardware_type'] = node['node_type']
+ #rspec_node['hardware_type'] = "orbit"
+
+ #slivers
+ if node['node_id'] in slivers:
+ # add sliver info
+ sliver = slivers[node['node_id']]
+ rspec_node['sliver_id'] = sliver['node_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+
+ rspec_nodes.append(rspec_node)
+ return rspec_nodes
+
+ def get_leases_and_channels(self, slice=None, slice_xrn=None, options={}):
+
+ slices = self.driver.shell.getSlices({}, [])
+ nodes = self.driver.shell.getNodes({}, [])
+ leases = self.driver.shell.getReservedNodes({}, [])
+ channels = self.driver.shell.getChannels({}, [])
+ reserved_channels = self.driver.shell.getReservedChannels()
+ grain = self.driver.testbedInfo['grain']
+
+ if slice_xrn and not slice:
+ return ([], [])
+
+ if slice:
+ all_leases = []
+ all_leases.extend(leases)
+ all_reserved_channels = []
+ all_reserved_channels.extend(reserved_channels)
+ for lease in all_leases:
+ if lease['slice_id'] != slice['slice_id']:
+ leases.remove(lease)
+ for channel in all_reserved_channels:
+ if channel['slice_id'] != slice['slice_id']:
+ reserved_channels.remove(channel)
+
+ rspec_channels = []
+ for channel in reserved_channels:
+
+ rspec_channel = {}
+ #retrieve channel number
+ for chl in channels:
+ if chl['channel_id'] == channel['channel_id']:
+ channel_number = chl['channel']
+ break
+
+ rspec_channel['channel_num'] = channel_number
+ rspec_channel['start_time'] = channel['start_time']
+ rspec_channel['duration'] = (int(channel['end_time']) - int(channel['start_time'])) / int(grain)
+
+ # retreive slicename
+ for slc in slices:
+ if slc['slice_id'] == channel['slice_id']:
+ slicename = slc['slice_name']
+ break
+
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
+ rspec_channel['slice_id'] = slice_urn
+ rspec_channels.append(rspec_channel)
+
+
+ rspec_leases = []
+ for lease in leases:
+
+ rspec_lease = Lease()
+
+ rspec_lease['lease_id'] = lease['reservation_id']
+ # retreive node name
+ for node in nodes:
+ if node['node_id'] == lease['node_id']:
+ nodename = node['hostname']
+ break
+
+ rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, self.driver.testbedInfo['name'], nodename)
+ # retreive slicename
+ for slc in slices:
+ if slc['slice_id'] == lease['slice_id']:
+ slicename = slc['slice_name']
+ break
+
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
+ rspec_lease['slice_id'] = slice_urn
+ rspec_lease['start_time'] = lease['start_time']
+ rspec_lease['duration'] = (int(lease['end_time']) - int(lease['start_time'])) / int(grain)
+ rspec_leases.append(rspec_lease)
+
+ return (rspec_leases, rspec_channels)
+
+
+ def get_channels(self, slice=None, options={}):
+
+ all_channels = self.driver.shell.getChannels({}, [])
+ channels = []
+ if slice:
+ reserved_channels = self.driver.shell.getReservedChannels()
+ reserved_channel_ids = []
+ for channel in reserved_channels:
+ if channel['slice_id'] == slice['slice_id']:
+ reserved_channel_ids.append(channel['channel_id'])
+
+ for channel in all_channels:
+ if channel['channel_id'] in reserved_channel_ids:
+ channels.append(channel)
+ else:
+ channels = all_channels
+
+ rspec_channels = []
+ for channel in channels:
+ rspec_channel = Channel()
+ rspec_channel['channel_num'] = channel['channel']
+ rspec_channel['frequency'] = channel['frequency']
+ rspec_channel['standard'] = channel['modulation']
+ rspec_channels.append(rspec_channel)
+ return rspec_channels
+
+
+
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+
+ slice, slivers = self.get_slice_and_slivers(slice_xrn)
+
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+ if slice and 'expires' in slice:
+ rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+ nodes = self.get_nodes(slice_xrn, slice, slivers, options)
+ rspec.version.add_nodes(nodes)
+ # add sliver defaults
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ for attrib in default_sliver_attribs:
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+ # add wifi channels
+ channels = self.get_channels(slice, options)
+ rspec.version.add_channels(channels)
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases, channels = self.get_leases_and_channels(slice, slice_xrn)
+ rspec.version.add_leases(leases, channels)
+
+ return rspec.toxml()
+
+
--- /dev/null
+import time
+import datetime
+#
+from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
+from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_hrn
+from sfa.util.cache import Cache
+
+# one would think the driver should not need to mess with the SFA db, but..
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+# used to be used in get_ticket
+#from sfa.trust.sfaticket import SfaTicket
+
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+# the driver interface, mostly provides default behaviours
+from sfa.managers.driver import Driver
+
+from sfa.nitos.nitosshell import NitosShell
+from sfa.nitos.nitosaggregate import NitosAggregate
+from sfa.nitos.nitosslices import NitosSlices
+
+from sfa.nitos.nitosxrn import NitosXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_nitos_slicename, xrn_to_hostname
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ return dict ( [ (rec[key],rec) for rec in recs ] )
+
+#
+# NitosShell is just an xmlrpc serverproxy where methods
+# can be sent as-is; it takes care of authentication
+# from the global config
+#
+class NitosDriver (Driver):
+
+ # the cache instance is a class member so it survives across incoming requests
+ cache = None
+
+ def __init__ (self, config):
+ Driver.__init__ (self, config)
+ self.shell = NitosShell (config)
+ self.cache=None
+ self.testbedInfo = self.shell.getTestbedInfo()
+# un-comment below lines to enable caching
+# if config.SFA_AGGREGATE_CACHING:
+# if NitosDriver.cache is None:
+# NitosDriver.cache = Cache()
+# self.cache = NitosDriver.cache
+
+ ###########################################
+ ########## utility methods for NITOS driver
+ ###########################################
+
+
+ def filter_nitos_results (self, listo, filters_dict):
+ """
+ the Nitos scheduler API does not provide a get result filtring so we do it here
+ """
+ mylist = []
+ mylist.extend(listo)
+ for dicto in mylist:
+ for filter in filters_dict:
+ if filter not in dicto or dicto[filter] != filters_dict[filter]:
+ listo.remove(dicto)
+ break
+ return listo
+
+ def convert_id (self, list_of_dict):
+ """
+ convert object id retrived in string format to int format
+ """
+ for dicto in list_of_dict:
+ for key in dicto:
+ if key in ['node_id', 'slice_id', 'user_id', 'channel_id', 'reservation_id'] and isinstance(dicto[key], str):
+ dicto[key] = int(dicto[key])
+ elif key in ['user_ids']:
+ user_ids2 = []
+ for user_id in dicto['user_ids']:
+ user_ids2.append(int(user_id))
+ dicto['user_ids'] = user_ids2
+ return list_of_dict
+
+
+
+ ########################################
+ ########## registry oriented
+ ########################################
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ ##########
+ def register (self, sfa_record, hrn, pub_key):
+ type = sfa_record['type']
+ nitos_record = self.sfa_fields_to_nitos_fields(type, hrn, sfa_record)
+
+ if type == 'authority':
+ pointer = -1
+
+ elif type == 'slice':
+ slices = self.shell.getSlices()
+ # filter slices
+ for slice in slices:
+ if slice['slice_name'] == nitos_record['name']:
+ slice_id = slice['slice_id']
+ break
+
+ if not slice_id:
+ pointer = self.shell.addSlice({'slice_name' : nitos_record['name']})
+ else:
+ pointer = slice_id
+
+ elif type == 'user':
+ users = self.shell.getUsers()
+ # filter users
+ for user in users:
+ if user['user_name'] == nitos_record['name']:
+ user_id = user['user_id']
+ break
+ if not user_id:
+ pointer = self.shell.addUser({'username' : nitos_record['name'], 'email' : nitos_record['email']})
+ else:
+ pointer = user_id
+
+
+ # Add the user's key
+ if pub_key:
+ self.shell.addUserKey({'user_id' : pointer,'key' : pub_key})
+
+ elif type == 'node':
+ nodes = self.shell.GetNodes({}, [])
+ # filter nodes
+ for node in nodes:
+ if node['hostname'] == nitos_record['name']:
+ node_id = node['node_id']
+ break
+
+ if not node_id:
+ pointer = self.shell.addNode(nitos_record)
+ else:
+ pointer = node_id
+
+ return pointer
+
+ ##########
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+
+ pointer = old_sfa_record['pointer']
+ type = old_sfa_record['type']
+ new_nitos_record = self.sfa_fields_to_nitos_fields(type, hrn, new_sfa_record)
+
+ # new_key implemented for users only
+ if new_key and type not in [ 'user' ]:
+ raise UnknownSfaType(type)
+
+ if type == "slice":
+ if 'name' in new_sfa_record:
+ self.shell.updateSlice({'slice_id': pointer, 'fields': {'slice_name': new_sfa_record['name']}})
+
+ elif type == "user":
+ update_fields = {}
+ if 'name' in new_sfa_record:
+ update_fields['username'] = new_sfa_record['name']
+ if 'email' in new_sfa_record:
+ update_fields['email'] = new_sfa_record['email']
+
+ self.shell.updateUser({'user_id': pointer, 'fields': update_fields})
+
+ if new_key:
+ # needs to be improved
+ self.shell.addUserKey({'user_id': pointer, 'key': new_key})
+
+ elif type == "node":
+ self.shell.updateNode({'node_id': pointer, 'fields': new_sfa_record})
+
+ return True
+
+
+ ##########
+ def remove (self, sfa_record):
+
+ type=sfa_record['type']
+ pointer=sfa_record['pointer']
+ if type == 'user':
+ self.shell.deleteUser({'user_id': pointer})
+ elif type == 'slice':
+ self.shell.deleteSlice({'slice_id': pointer})
+ elif type == 'node':
+ self.shell.deleteNode({'node_id': pointer})
+
+ return True
+
+
+
+
+
+ ##
+ # Convert SFA fields to NITOS fields for use when registering or updating
+ # registry record in the NITOS Scheduler database
+ #
+
+ def sfa_fields_to_nitos_fields(self, type, hrn, sfa_record):
+
+ nitos_record = {}
+
+ if type == "slice":
+ nitos_record["slice_name"] = hrn_to_nitos_slicename(hrn)
+ elif type == "node":
+ if "hostname" not in sfa_record:
+ raise MissingSfaInfo("hostname")
+ nitos_record["node_name"] = sfa_record["hostname"]
+
+ return nitos_record
+
+ ####################
+ def fill_record_info(self, records):
+ """
+ Given a (list of) SFA record, fill in the NITOS specific
+ and SFA specific fields in the record.
+ """
+ if not isinstance(records, list):
+ records = [records]
+
+ self.fill_record_nitos_info(records)
+ self.fill_record_hrns(records)
+ self.fill_record_sfa_info(records)
+ return records
+
+ def fill_record_nitos_info(self, records):
+ """
+ Fill in the nitos specific fields of a SFA record. This
+ involves calling the appropriate NITOS API method to retrieve the
+ database record for the object.
+
+ @param record: record to fill in field (in/out param)
+ """
+
+ # get ids by type
+ node_ids, slice_ids = [], []
+ user_ids, key_ids = [], []
+ type_map = {'node': node_ids, 'slice': slice_ids, 'user': user_ids}
+
+ for record in records:
+ for type in type_map:
+ if type == record['type']:
+ type_map[type].append(record['pointer'])
+
+ # get nitos records
+ nodes, slices, users, keys = {}, {}, {}, {}
+ if node_ids:
+ all_nodes = self.convert_id(self.shell.getNodes({}, []))
+ node_list = [node for node in all_nodes if node['node_id'] in node_ids]
+ nodes = list_to_dict(node_list, 'node_id')
+ if slice_ids:
+ all_slices = self.convert_id(self.shell.getSlices({}, []))
+ slice_list = [slice for slice in all_slices if slice['slice_id'] in slice_ids]
+ slices = list_to_dict(slice_list, 'slice_id')
+ if user_ids:
+ all_users = self.convert_id(self.shell.getUsers())
+ user_list = [user for user in all_users if user['user_id'] in user_ids]
+ users = list_to_dict(user_list, 'user_id')
+
+ nitos_records = {'node': nodes, 'slice': slices, 'user': users}
+
+
+ # fill record info
+ for record in records:
+ if record['pointer'] == -1:
+ continue
+
+ for type in nitos_records:
+ if record['type'] == type:
+ if record['pointer'] in nitos_records[type]:
+ record.update(nitos_records[type][record['pointer']])
+ break
+ # fill in key info
+ if record['type'] == 'user':
+ if record['pointer'] in nitos_records['user']:
+ record['keys'] = nitos_records['user'][record['pointer']]['keys']
+
+ return records
+
+
+ def fill_record_hrns(self, records):
+ """
+ convert nitos ids to hrns
+ """
+
+
+ # get ids
+ slice_ids, user_ids, node_ids = [], [], []
+ for record in records:
+ if 'user_ids' in record:
+ user_ids.extend(record['user_ids'])
+ if 'slice_ids' in record:
+ slice_ids.extend(record['slice_ids'])
+ if 'node_ids' in record:
+ node_ids.extend(record['node_ids'])
+
+ # get nitos records
+ slices, users, nodes = {}, {}, {}
+ if node_ids:
+ all_nodes = self.convert_id(self.shell.getNodes({}, []))
+ node_list = [node for node in all_nodes if node['node_id'] in node_ids]
+ nodes = list_to_dict(node_list, 'node_id')
+ if slice_ids:
+ all_slices = self.convert_id(self.shell.getSlices({}, []))
+ slice_list = [slice for slice in all_slices if slice['slice_id'] in slice_ids]
+ slices = list_to_dict(slice_list, 'slice_id')
+ if user_ids:
+ all_users = self.convert_id(self.shell.getUsers())
+ user_list = [user for user in all_users if user['user_id'] in user_ids]
+ users = list_to_dict(user_list, 'user_id')
+
+
+ # convert ids to hrns
+ for record in records:
+ # get all relevant data
+ type = record['type']
+ pointer = record['pointer']
+ auth_hrn = self.hrn
+ testbed_name = self.testbedInfo['name']
+ if pointer == -1:
+ continue
+ if 'user_ids' in record:
+ usernames = [users[user_id]['username'] for user_id in record['user_ids'] \
+ if user_id in users]
+ user_hrns = [".".join([auth_hrn, testbed_name, username]) for username in usernames]
+ record['users'] = user_hrns
+ if 'slice_ids' in record:
+ slicenames = [slices[slice_id]['slice_name'] for slice_id in record['slice_ids'] \
+ if slice_id in slices]
+ slice_hrns = [slicename_to_hrn(auth_hrn, slicename) for slicename in slicenames]
+ record['slices'] = slice_hrns
+ if 'node_ids' in record:
+ hostnames = [nodes[node_id]['hostname'] for node_id in record['node_ids'] \
+ if node_id in nodes]
+ node_hrns = [hostname_to_hrn(auth_hrn, login_base, hostname) for hostname in hostnames]
+ record['nodes'] = node_hrns
+
+ if 'expires' in record:
+ date = utcparse(record['expires'])
+ datestring = datetime_to_string(date)
+ record['expires'] = datestring
+
+ return records
+
+ def fill_record_sfa_info(self, records):
+
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
+ # get user ids
+ user_ids = []
+ for record in records:
+ user_ids.extend(record.get("user_ids", []))
+
+ # get the registry records
+ user_list, users = [], {}
+ user_list = dbsession.query(RegRecord).filter(RegRecord.pointer.in_(user_ids)).all()
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ users = defaultdict(list)
+ for user in user_list:
+ users[user.pointer].append(user)
+
+ # get the nitos records
+ nitos_user_list, nitos_users = [], {}
+ nitos_all_users = self.convert_id(self.shell.getUsers())
+ nitos_user_list = [user for user in nitos_all_users if user['user_id'] in user_ids]
+ nitos_users = list_to_dict(nitos_user_list, 'user_id')
+
+
+ # fill sfa info
+ for record in records:
+ if record['pointer'] == -1:
+ continue
+
+ sfa_info = {}
+ type = record['type']
+ logger.info("fill_record_sfa_info - incoming record typed %s"%type)
+ if (type == "slice"):
+ # all slice users are researchers
+ record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+ record['researcher'] = []
+ for user_id in record.get('user_ids', []):
+ hrns = [user.hrn for user in users[user_id]]
+ record['researcher'].extend(hrns)
+
+ elif (type == "node"):
+ sfa_info['dns'] = record.get("hostname", "")
+ # xxx TODO: URI, LatLong, IP, DNS
+
+ elif (type == "user"):
+ logger.info('setting user.email')
+ sfa_info['email'] = record.get("email", "")
+ sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+ sfa_info['geni_certificate'] = record['gid']
+ # xxx TODO: PostalAddress, Phone
+ record.update(sfa_info)
+
+ ####################
+ def update_relation (self, subject_type, target_type, relation_name, subject_id, target_ids):
+
+ if subject_type =='slice' and target_type == 'user' and relation_name == 'researcher':
+ subject=self.shell.getSlices ({'slice_id': subject_id}, [])[0]
+ current_target_ids = subject['user_ids']
+ add_target_ids = list ( set (target_ids).difference(current_target_ids))
+ del_target_ids = list ( set (current_target_ids).difference(target_ids))
+ logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+ for target_id in add_target_ids:
+ self.shell.addUserToSlice ({'user_id': target_id, 'slice_id': subject_id})
+ logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ for target_id in del_target_ids:
+ logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ self.shell.deleteUserFromSlice ({'user_id': target_id, 'slice_id': subject_id})
+ else:
+ logger.info('unexpected relation %s to maintain, %s -> %s'%(relation_name,subject_type,target_type))
+
+
+ ########################################
+ ########## aggregate oriented
+ ########################################
+
+ def testbed_name (self): return "nitos"
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ if self.cache:
+ slices = self.cache.get('slices')
+ if slices:
+ logger.debug("NitosDriver.list_slices returns from cache")
+ return slices
+
+ # get data from db
+ slices = self.shell.getSlices({}, [])
+ testbed_name = self.testbedInfo['name']
+ slice_hrns = [slicename_to_hrn(self.hrn, testbed_name, slice['slice_name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ if self.cache:
+ logger.debug ("NitosDriver.list_slices stores value in cache")
+ self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ cached_requested = options.get('cached', True)
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ #rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ # rspec's return format for nitos aggregate is version NITOS 1
+ rspec_version = version_manager.get_version('NITOS 1')
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_"+options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_"+options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + str(options.get('geni_available'))
+
+ # look in cache first
+ if cached_requested and self.cache and not slice_hrn:
+ rspec = self.cache.get(version_string)
+ if rspec:
+ logger.debug("NitosDriver.ListResources: returning cached advertisement")
+ return rspec
+
+ #panos: passing user-defined options
+ #print "manager options = ",options
+ aggregate = NitosAggregate(self)
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
+ options=options)
+
+ # cache the result
+ if self.cache and not slice_hrn:
+ logger.debug("NitosDriver.ListResources: stores advertisement in cache")
+ self.cache.add(version_string, rspec)
+
+ return rspec
+
+ def sliver_status (self, slice_urn, slice_hrn):
+ # find out where this slice is currently running
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+
+ slices = self.shell.getSlices({}, [])
+ # filter slicename
+ if len(slices) == 0:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ for slice in slices:
+ if slice['slice_name'] == slicename:
+ user_slice = slice
+ break
+
+ if not user_slice:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ # report about the reserved nodes only
+ reserved_nodes = self.shell.getReservedNodes({}, [])
+ nodes = self.shell.getNodes({}, [])
+
+ slice_reserved_nodes = []
+ for r_node in reserved_nodes:
+ if r_node['slice_id'] == slice['slice_id']:
+ for node in nodes:
+ if node['node_id'] == r_node['node_id']:
+ slice_reserved_nodes.append(node)
+
+
+
+
+ if len(slice_reserved_nodes) == 0:
+ raise SliverDoesNotExist("You have not allocated any slivers here")
+
+##### continue from here
+ # get login info
+ user = {}
+ keys = []
+ if slice['user_ids']:
+ users = self.shell.getUsers()
+ # filter users on slice['user_ids']
+ for usr in users:
+ if usr['user_id'] in slice['user_ids']:
+ keys.extend(usr['keys'])
+
+
+ user.update({'urn': slice_urn,
+ 'login': slice['slice_name'],
+ 'protocol': ['ssh'],
+ 'port': ['22'],
+ 'keys': keys})
+
+
+ result = {}
+ top_level_status = 'unknown'
+ if slice_reserved_nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['nitos_gateway_login'] = slice['slice_name']
+ #result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+ #result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
+
+ resources = []
+ for node in slice_reserved_nodes:
+ res = {}
+ res['nitos_hostname'] = node['hostname']
+ sliver_id = Xrn(slice_urn, type='slice', id=node['node_id']).urn
+ res['geni_urn'] = sliver_id
+ res['geni_status'] = 'ready'
+ res['geni_error'] = ''
+ res['users'] = [user]
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+
+ return result
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+ aggregate = NitosAggregate(self)
+ slices = NitosSlices(self)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record=None
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string, version='NITOS 1')
+
+ # ensure slice record exists
+ slice = slices.verify_slice(slice_hrn, slice_record, sfa_peer, options=options)
+ # ensure user records exists
+ users = slices.verify_users(slice_hrn, slice, users, sfa_peer, options=options)
+
+ # add/remove leases (nodes and channels)
+ # a lease in Nitos RSpec case is a reservation of nodes and channels grouped by (slice,timeslot)
+ rspec_requested_nodes, rspec_requested_channels = rspec.version.get_leases()
+
+ nodes = slices.verify_slice_leases_nodes(slice, rspec_requested_nodes)
+ channels = slices.verify_slice_leases_channels(slice, rspec_requested_channels)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.filter_nitos_results(self.shell.getSlices({}, []), {'slice_name': slicename})
+ if not slices:
+ return 1
+ slice = slices[0]
+
+ slice_reserved_nodes = self.filter_nitos_results(self.shell.getReservedNodes({}, []), {'slice_id': slice['slice_id'] })
+ slice_reserved_channels = self.filter_nitos_results(self.shell.getReservedChannels(), {'slice_id': slice['slice_id'] })
+
+ slice_reserved_nodes_ids = [node['reservation_id'] for node in slice_reserved_nodes]
+ slice_reserved_channels_ids = [channel['reservation_id'] for channel in slice_reserved_channels]
+
+ # release all reserved nodes and channels for that slice
+ try:
+ released_nodes = self.shell.releaseNodes({'reservation_ids': slice_reserved_nodes_ids})
+ released_channels = self.shell.releaseChannels({'reservation_ids': slice_reserved_channels_ids})
+ except:
+ pass
+ return 1
+
+ def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'slicename': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice = slices[0]
+ requested_time = utcparse(expiration_time)
+ record = {'expires': int(datetime_to_epoch(requested_time))}
+ try:
+ self.shell.UpdateSlice(slice['slice_id'], record)
+
+ return True
+ except:
+ return False
+
+
+ # xxx this code is quite old and has not run for ages
+ # it is obviously totally broken and needs a rewrite
+ def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
+ raise SfaNotImplemented,"NitosDriver.get_ticket needs a rewrite"
+# please keep this code for future reference
+# slices = PlSlices(self)
+# peer = slices.get_peer(slice_hrn)
+# sfa_peer = slices.get_sfa_peer(slice_hrn)
+#
+# # get the slice record
+# credential = api.getCredential()
+# interface = api.registries[api.hrn]
+# registry = api.server_proxy(interface, credential)
+# records = registry.Resolve(xrn, credential)
+#
+# # make sure we get a local slice record
+# record = None
+# for tmp_record in records:
+# if tmp_record['type'] == 'slice' and \
+# not tmp_record['peer_authority']:
+# #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
+# slice_record = SliceRecord(dict=tmp_record)
+# if not record:
+# raise RecordNotFound(slice_hrn)
+#
+# # similar to CreateSliver, we must verify that the required records exist
+# # at this aggregate before we can issue a ticket
+# # parse rspec
+# rspec = RSpec(rspec_string)
+# requested_attributes = rspec.version.get_slice_attributes()
+#
+# # ensure site record exists
+# site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure slice record exists
+# slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure person records exists
+# # xxx users is undefined in this context
+# persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
+# # ensure slice attributes exists
+# slices.verify_slice_attributes(slice, requested_attributes)
+#
+# # get sliver info
+# slivers = slices.get_slivers(slice_hrn)
+#
+# if not slivers:
+# raise SliverDoesNotExist(slice_hrn)
+#
+# # get initscripts
+# initscripts = []
+# data = {
+# 'timestamp': int(time.time()),
+# 'initscripts': initscripts,
+# 'slivers': slivers
+# }
+#
+# # create the ticket
+# object_gid = record.get_gid_object()
+# new_ticket = SfaTicket(subject = object_gid.get_subject())
+# new_ticket.set_gid_caller(api.auth.client_gid)
+# new_ticket.set_gid_object(object_gid)
+# new_ticket.set_issuer(key=api.key, subject=self.hrn)
+# new_ticket.set_pubkey(object_gid.get_pubkey())
+# new_ticket.set_attributes(data)
+# new_ticket.set_rspec(rspec)
+# #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+# new_ticket.encode()
+# new_ticket.sign()
+#
+# return new_ticket.save_to_string(save_parents=True)
--- /dev/null
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+
+from sfa.util.sfalogging import logger
+
+class NitosShell:
+ """
+ A simple xmlrpc shell to a NITOS Scheduler instance
+ This class can receive all NITOS API calls to the underlying testbed
+ For safety this is limited to a set of hard-coded calls
+ """
+
+ direct_calls = ['getNodes','getChannels','getSlices','getUsers','getReservedNodes',
+ 'getReservedChannels','getTestbedInfo',
+ 'reserveNodes','reserveChannels','addSlice','addUser','addUserToSlice',
+ 'addUserKey','addNode', 'addChannel',
+ 'updateReservedNodes','updateReservedChannels','updateSlice','updateUser',
+ 'updateNode', 'updateChannel',
+ 'deleteNode','deleteChannel','deleteSlice','deleteUser', 'deleteUserFromSLice',
+ 'deleteKey', 'releaseNodes', 'releaseChannels'
+ ]
+
+
+ # use the 'capability' auth mechanism for higher performance when the PLC db is local
+ def __init__ ( self, config ) :
+ url = config.SFA_NITOS_URL
+ self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
+
+ def __getattr__(self, name):
+ def func(*args, **kwds):
+ actual_name=None
+ if name in NitosShell.direct_calls: actual_name=name
+ if not actual_name:
+ raise Exception, "Illegal method call %s for NITOS driver"%(name)
+ actual_name = "scheduler.server." + actual_name
+ result=getattr(self.proxy, actual_name)(*args, **kwds)
+ logger.debug('NitosShell %s (%s) returned ... '%(name,actual_name))
+ return result
+ return func
+
--- /dev/null
+from types import StringTypes
+from collections import defaultdict
+
+from sfa.util.sfatime import utcparse, datetime_to_epoch
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
+
+from sfa.rspecs.rspec import RSpec
+
+from sfa.nitos.nitosxrn import NitosXrn, hrn_to_nitos_slicename, xrn_to_hostname
+
+MAXINT = 2L**31-1
+
+class NitosSlices:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def get_sfa_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+ def verify_slice_leases_nodes(self, slice, rspec_requested_nodes):
+ nodes = self.driver.shell.getNodes({}, [])
+
+ requested_nodes = []
+ for node in rspec_requested_nodes:
+ requested_node = {}
+ nitos_nodes = []
+ nitos_nodes.extend(nodes)
+ slice_name = hrn_to_nitos_slicename(node['slice_id'])
+ if slice_name != slice['slice_name']:
+ continue
+ hostname = xrn_to_hostname(node['component_id'])
+ nitos_node = self.driver.filter_nitos_results(nitos_nodes, {'hostname': hostname})
+ if not nitos_node:
+ continue
+ nitos_node = nitos_node[0]
+ # fill the requested node with nitos ids
+ requested_node['slice_id'] = slice['slice_id']
+ requested_node['node_id'] = nitos_node['node_id']
+ requested_node['start_time'] = node['start_time']
+ requested_node['end_time'] = str(int(node['duration']) * int(self.driver.testbedInfo['grain']) + int(node['start_time']))
+ requested_nodes.append(requested_node)
+
+ # get actual nodes reservation data for the slice
+ reserved_nodes = self.driver.filter_nitos_results(self.driver.shell.getReservedNodes({}, []), {'slice_id': slice['slice_id']})
+
+ reserved_nodes_by_id = {}
+ for node in reserved_nodes:
+ reserved_nodes_by_id[node['reservation_id']] = {'slice_id': node['slice_id'], \
+ 'node_id': node['node_id'], 'start_time': node['start_time'], \
+ 'end_time': node['end_time']}
+
+ added_nodes = []
+ kept_nodes_id = []
+ deleted_nodes_id = []
+ for reservation_id in reserved_nodes_by_id:
+ if reserved_nodes_by_id[reservation_id] not in requested_nodes:
+ deleted_nodes_id.append(reservation_id)
+ else:
+ kept_nodes_id.append(reservation_id)
+ requested_nodes.remove(reserved_nodes_by_id[reservation_id])
+ added_nodes = requested_nodes
+
+
+ try:
+ deleted=self.driver.shell.releaseNodes({'reservation_ids': deleted_nodes_id})
+ for node in added_nodes:
+ added=self.driver.shell.reserveNodes({'slice_id': slice['slice_id'], 'start_time': node['start_time'], 'end_time': node['end_time'], 'nodes': [node['node_id']]})
+
+ except:
+ logger.log_exc('Failed to add/remove slice leases nodes')
+
+ return added_nodes
+
+
+ def verify_slice_leases_channels(self, slice, rspec_requested_channels):
+ channels = self.driver.shell.getChannels({}, [])
+
+ requested_channels = []
+ for channel in rspec_requested_channels:
+ requested_channel = {}
+ nitos_channels = []
+ nitos_channels.extend(channels)
+ slice_name = hrn_to_nitos_slicename(channel['slice_id'])
+ if slice_name != slice['slice_name']:
+ continue
+ channel_num = channel['channel_num']
+ nitos_channel = self.driver.filter_nitos_results(nitos_channels, {'channel': channel_num})[0]
+ # fill the requested channel with nitos ids
+ requested_channel['slice_id'] = slice['slice_id']
+ requested_channel['channel_id'] = nitos_channel['channel_id']
+ requested_channel['start_time'] = channel['start_time']
+ requested_channel['end_time'] = str(int(channel['duration']) * int(self.driver.testbedInfo['grain']) + int(channel['start_time']))
+ requested_channels.append(requested_channel)
+
+ # get actual channel reservation data for the slice
+ reserved_channels = self.driver.filter_nitos_results(self.driver.shell.getReservedChannels(), {'slice_id': slice['slice_id']})
+
+ reserved_channels_by_id = {}
+ for channel in reserved_channels:
+ reserved_channels_by_id[channel['reservation_id']] = {'slice_id': channel['slice_id'], \
+ 'channel_id': channel['channel_id'], 'start_time': channel['start_time'], \
+ 'end_time': channel['end_time']}
+
+ added_channels = []
+ kept_channels_id = []
+ deleted_channels_id = []
+ for reservation_id in reserved_channels_by_id:
+ if reserved_channels_by_id[reservation_id] not in requested_channels:
+ deleted_channels_id.append(reservation_id)
+ else:
+ kept_channels_id.append(reservation_id)
+ requested_channels.remove(reserved_channels_by_id[reservation_id])
+ added_channels = requested_channels
+
+
+ try:
+ deleted=self.driver.shell.releaseChannels({'reservation_ids': deleted_channels_id})
+ for channel in added_channels:
+ added=self.driver.shell.reserveChannels({'slice_id': slice['slice_id'], 'start_time': channel['start_time'], 'end_time': channel['end_time'], 'channels': [channel['channel_id']]})
+
+ except:
+ logger.log_exc('Failed to add/remove slice leases channels')
+
+ return added_channels
+
+
+ def free_egre_key(self):
+ used = set()
+ for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
+ used.add(int(tag['value']))
+
+ for i in range(1, 256):
+ if i not in used:
+ key = i
+ break
+ else:
+ raise KeyError("No more EGRE keys available")
+
+ return str(key)
+
+
+
+ def verify_slice(self, slice_hrn, slice_record, sfa_peer, options={}):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices({}, [])
+ slices = self.driver.filter_nitos_results(slices, {'slice_name': slicename})
+ if not slices:
+ slice = {'slice_name': slicename}
+ # add the slice
+ slice['slice_id'] = self.driver.shell.addSlice(slice)
+ slice['node_ids'] = []
+ slice['user_ids'] = []
+ else:
+ slice = slices[0]
+
+ return slice
+
+ def verify_users(self, slice_hrn, slice_record, users, sfa_peer, options={}):
+ # get slice info
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices({}, [])
+ slice = self.driver.filter_nitos_results(slices, {'slice_name': slicename})[0]
+ added_users = []
+ #get users info
+ users_info = []
+ for user in users:
+ user_urn = user['urn']
+ user_hrn, type = urn_to_hrn(user_urn)
+ username = str(user_hrn).split('.')[-1]
+ email = user['email']
+ # look for the user according to his username, email...
+ nitos_users = self.driver.filter_nitos_results(self.driver.shell.getUsers(), {'username': username})
+ if not nitos_users:
+ nitos_users = self.driver.filter_nitos_results(self.driver.shell.getUsers(), {'email': email})
+
+ if not nitos_users:
+ # create the user
+ user_id = self.driver.shell.addUser({'username': email.split('@')[0], 'email': email})
+ added_users.append(user_id)
+ # add user keys
+ for key in user['keys']:
+ self.driver.shell.addUserKey({'user_id': user_id, 'key': key, 'slice_id': slice['slice_id']})
+ # add the user to the slice
+ self.driver.shell.addUserToSlice({'slice_id': slice['slice_id'], 'user_id': user_id})
+ else:
+ # check if the users are in the slice
+ for user in nitos_users:
+ if not user['user_id'] in slice['user_ids']:
+ self.driver.shell.addUserToSlice({'slice_id': slice['slice_id'], 'user_id': user['user_id']})
+
+ return added_users
+
+
+ def verify_keys(self, persons, users, options={}):
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ try:
+ if peer:
+ person = persondict[user['email']]
+ self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+ key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
+ if peer:
+ key_index = user_keys.index(key['key'])
+ remote_key_id = user['key_ids'][key_index]
+ self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
+
+ finally:
+ if peer:
+ self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for existing_key_id in keydict:
+ if keydict[existing_key_id] in removed_keys:
+ try:
+ if peer:
+ self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+ self.driver.shell.DeleteKey(existing_key_id)
+ except:
+ pass
+
+
--- /dev/null
+# specialized Xrn class for NITOS
+import re
+from sfa.util.xrn import Xrn
+
+# temporary helper functions to use this module instead of namespace
+def hostname_to_hrn (auth, login_base, hostname):
+ return NitosXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
+def hostname_to_urn(auth, login_base, hostname):
+ return NitosXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
+def slicename_to_hrn (auth_hrn,site_name,slicename):
+ return NitosXrn(auth=auth_hrn+'.'+site_name,slicename=slicename).get_hrn()
+# hack to convert nitos user name to hrn
+def username_to_hrn (auth_hrn,site_name,username):
+ return NitosXrn(auth=auth_hrn+'.'+site_name,slicename=username).get_hrn()
+def email_to_hrn (auth_hrn, email):
+ return NitosXrn(auth=auth_hrn, email=email).get_hrn()
+def hrn_to_nitos_slicename (hrn):
+ return NitosXrn(xrn=hrn,type='slice').nitos_slicename()
+# removed-dangerous - was used for non-slice objects
+#def hrn_to_nitos_login_base (hrn):
+# return NitosXrn(xrn=hrn,type='slice').nitos_login_base()
+def hrn_to_nitos_authname (hrn):
+ return NitosXrn(xrn=hrn,type='any').nitos_authname()
+def xrn_to_hostname(hrn):
+ return Xrn.unescape(NitosXrn(xrn=hrn, type='node').get_leaf())
+
+class NitosXrn (Xrn):
+
+ @staticmethod
+ def site_hrn (auth, login_base):
+ return '.'.join([auth,login_base])
+
+ def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
+ #def hostname_to_hrn(auth_hrn, login_base, hostname):
+ if hostname is not None:
+ self.type='node'
+ # keep only the first part of the DNS name
+ #self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
+ # escape the '.' in the hostname
+ self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
+ self.hrn_to_urn()
+ #def slicename_to_hrn(auth_hrn, slicename):
+ elif slicename is not None:
+ self.type='slice'
+ self.hrn = ".".join([auth] + [slicename.replace(".", "_")])
+ self.hrn_to_urn()
+ #def email_to_hrn(auth_hrn, email):
+ elif email is not None:
+ self.type='person'
+ # keep only the part before '@' and replace special chars into _
+ self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
+ self.hrn_to_urn()
+ elif interface is not None:
+ self.type = 'interface'
+ self.hrn = auth + '.' + interface
+ self.hrn_to_urn()
+ else:
+ Xrn.__init__ (self,**kwargs)
+
+ #def hrn_to_pl_slicename(hrn):
+ def nitos_slicename (self):
+ self._normalize()
+ leaf = self.leaf
+ sliver_id_parts = leaf.split(':')
+ name = sliver_id_parts[0]
+ name = re.sub('[^a-zA-Z0-9_]', '', name)
+ #return self.nitos_login_base() + '_' + name
+ return name
+
+ #def hrn_to_pl_authname(hrn):
+ def nitos_authname (self):
+ self._normalize()
+ return self.authority[-1]
+
+ def interface_name(self):
+ self._normalize()
+ return self.leaf
+
+ def nitos_login_base (self):
+ self._normalize()
+ if self.type and self.type.startswith('authority'):
+ base = self.leaf
+ else:
+ base = self.authority[-1]
+
+ # Fix up names of GENI Federates
+ base = base.lower()
+ base = re.sub('\\\[^a-zA-Z0-9]', '', base)
+
+ if len(base) > 20:
+ base = base[len(base)-20:]
+
+ return base
+
+
+if __name__ == '__main__':
+
+ #nitosxrn = NitosXrn(auth="omf.nitos",slicename="aminesl")
+ #slice_hrn = nitosxrn.get_hrn()
+ #slice_name = NitosXrn(xrn="omf.nitos.aminesl",type='slice').nitos_slicename()
+ slicename = "giorgos_n"
+ hrn = slicename_to_hrn("pla", "nitos", slicename)
+ print hrn
from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
from sfa.rspecs.version_manager import VersionManager
from sfa.openstack.security_group import SecurityGroup
+from sfa.server.threadmanager import ThreadManager
from sfa.util.sfalogging import logger
def pubkeys_to_user_data(pubkeys):
'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
}
-
return geni_sliver
def get_aggregate_nodes(self):
cidr_ip = rule.get('cidr_ip'),
port_range = rule.get('port_range'),
icmp_type_code = rule.get('icmp_type_code'))
+ # Open ICMP by default
+ security_group.add_rule_to_group(group_name,
+ protocol = "icmp",
+ cidr_ip = "0.0.0.0/0",
+ icmp_type_code = "-1:-1")
return group_name
def add_rule_to_security_group(self, group_name, **kwds):
files = {'/root/.ssh/authorized_keys': authorized_keys}
rspec = RSpec(rspec)
requested_instances = defaultdict(list)
+
# iterate over clouds/zones/nodes
slivers = []
for node in rspec.version.get_nodes_with_slivers():
image = instance.get('disk_image')
if image and isinstance(image, list):
image = image[0]
+ else:
+ raise InvalidRSpec("Must specify a disk_image for each VM")
image_id = self.driver.shell.nova_manager.images.find(name=image['name'])
fw_rules = instance.get('fw_rules', [])
group_name = self.create_security_group(instance_name, fw_rules)
}
return geni_sliver
- def get_leases(self, slice=None, options={}):
+ def get_leases(self, slice_xrn=None, slice=None, options={}):
+ if slice_xrn and not slice:
+ return []
+
now = int(time.time())
filter={}
filter.update({'clip':now})
site_id=lease['site_id']
site=sites_dict[site_id]
- rspec_lease['lease_id'] = lease['lease_id']
- rspec_lease['component_id'] = PlXrn(self.driver.hrn, hostname=lease['hostname']).urn
- slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], lease['hostname'])
+ if slice_xrn:
+ slice_urn = slice_xrn
+ slice_hrn = urn_to_hrn(slice_urn)
+ else:
+ slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
rspec_lease['slice_id'] = slice_urn
rspec_lease['start_time'] = lease['t_from']
rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) / grain
# add links
links = self.get_links(sites, nodes_dict, interfaces)
rspec.version.add_links(links)
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases = self.get_leases(slice_xrn, slice)
+ rspec.version.add_leases(leases)
+
return rspec.toxml()
def describe(self, urns, version=None, options={}):
self.shell.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key})
elif type == 'node':
- login_base = PlXrn(xrn=sfa_record['authority'],type='node').pl_login_base()
+ login_base = PlXrn(xrn=sfa_record['authority'],type='authority').pl_login_base()
nodes = self.shell.GetNodes([pl_record['hostname']])
if not nodes:
pointer = self.shell.AddNode(login_base, pl_record)
def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
pointer = old_sfa_record['pointer']
type = old_sfa_record['type']
+ new_key_pointer = None
# new_key implemented for users only
if new_key and type not in [ 'user' ]:
keys = person['key_ids']
keys = self.shell.GetKeys(person['key_ids'])
- # Delete all stale keys
key_exists = False
for key in keys:
- if new_key != key['key']:
- self.shell.DeleteKey(key['key_id'])
- else:
+ if new_key == key['key']:
key_exists = True
+ new_key_pointer = key['key_id']
+ break
if not key_exists:
- self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+ new_key_pointer = self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
elif type == "node":
self.shell.UpdateNode(pointer, new_sfa_record)
- return True
+ return (pointer, new_key_pointer)
##########
slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
# add/remove leases
- requested_leases = []
- kept_leases = []
- for lease in rspec.version.get_leases():
- requested_lease = {}
- if not lease.get('lease_id'):
- requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
- requested_lease['start_time'] = lease.get('start_time')
- requested_lease['duration'] = lease.get('duration')
- else:
- kept_leases.append(int(lease['lease_id']))
- if requested_lease.get('hostname'):
- requested_leases.append(requested_lease)
+ try:
+ rspec_requested_leases = rspec.version.get_leases()
+ leases = slices.verify_slice_leases(slice, rspec_requested_leases, peer)
+ except:
+ pass
- leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
# handle MyPLC peer association.
# only used by plc and ple.
slices.handle_peer(site, slice, None, peer)
'geni_allocation_status': 'geni_unallocated',
'geni_expires': datetime_to_string(utcparse(sliver['expires']))})
return geni_slivers
-
+
def renew (self, urns, expiration_time, options={}):
aggregate = PlAggregate(self)
slivers = aggregate.get_slivers(urns)
return sfa_peer
- def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
-
- leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ def verify_slice_leases(self, slice, rspec_requested_leases, peer):
+
+ leases = self.driver.shell.GetLeases({'name':slice['name'], 'clip':int(time.time())}, ['lease_id','name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
- current_leases = [lease['lease_id'] for lease in leases]
- deleted_leases = list(set(current_leases).difference(kept_leases))
+
+ requested_leases = []
+ for lease in rspec_requested_leases:
+ requested_lease = {}
+ slice_name = hrn_to_pl_slicename(lease['slice_id'])
+ if slice_name != slice['name']:
+ continue
+ elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
+ continue
+
+ hostname = xrn_to_hostname(lease['component_id'])
+ # fill the requested node with nitos ids
+ requested_lease['name'] = slice['name']
+ requested_lease['hostname'] = hostname
+ requested_lease['t_from'] = int(lease['start_time'])
+ requested_lease['t_until'] = int(lease['duration']) * grain + int(lease['start_time'])
+ requested_leases.append(requested_lease)
+
+
+
+ # prepare actual slice leases by lease_id
+ leases_by_id = {}
+ for lease in leases:
+ leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'], \
+ 't_from': lease['t_from'], 't_until': lease['t_until']}
+
+ added_leases = []
+ kept_leases_id = []
+ deleted_leases_id = []
+ for lease_id in leases_by_id:
+ if leases_by_id[lease_id] not in requested_leases:
+ deleted_leases_id.append(lease_id)
+ else:
+ kept_leases_id.append(lease_id)
+ requested_leases.remove(leases_by_id[lease_id])
+ added_leases = requested_leases
+
try:
if peer:
self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
- deleted=self.driver.shell.DeleteLeases(deleted_leases)
- for lease in requested_leases:
- added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['start_time']), int(lease['duration']) * grain + int(lease['start_time']))
+ self.driver.shell.DeleteLeases(deleted_leases_id)
+ for lease in added_leases:
+ self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease['t_from'], lease['t_until'])
except:
logger.log_exc('Failed to add/remove slice leases')
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
current_slivers = [node['hostname'] for node in nodes]
+ requested_slivers = []
+ tags = []
+ for node in slivers:
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name').strip()
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id').strip())
+ if node.get('client_id'):
+ tags.append({'slicename': slice['name'],
+ 'tagname': 'client_id',
+ 'value': node['client_id'],
+ 'node': hostname})
+ if hostname:
+ requested_slivers.append(hostname)
+
# remove nodes not in rspec
deleted_nodes = list(set(current_slivers).difference(slivers.keys()))
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Channel(Element):
+
+ fields = [
+ 'reservation_id',
+ 'channel_num',
+ 'frequency',
+ 'standard',
+ 'slice_id',
+ 'start_time',
+ 'duration',
+ ]
class NodeElement(Element):
fields = [
+ 'client_id',
'component_id',
'component_name',
'component_manager_id',
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Position3D(Element):
+
+ fields = [
+ 'x',
+ 'y',
+ 'z',
+ ]
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Spectrum(Element):
+
+ fields = []
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.spectrum import Spectrum
+from sfa.rspecs.elements.channel import Channel
+
+
+class NITOSv1Channel:
+
+ @staticmethod
+ def add_channels(xml, channels):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(channels) > 0:
+ # dirty hack that handles no resource manifest rspec
+ network_urn = "omf"
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+# spectrum_elems = xml.xpath('//spectrum')
+# spectrum_elem = xml.add_element('spectrum')
+
+# if len(spectrum_elems) > 0:
+# spectrum_elem = spectrum_elems[0]
+# elif len(channels) > 0:
+# spectrum_elem = xml.add_element('spectrum')
+# else:
+# spectrum_elem = xml
+
+ spectrum_elem = network_elem.add_instance('spectrum', [])
+
+ channel_elems = []
+ for channel in channels:
+ channel_fields = ['channel_num', 'frequency', 'standard']
+ channel_elem = spectrum_elem.add_instance('channel', channel, channel_fields)
+ channel_elems.append(channel_elem)
+
+
+ @staticmethod
+ def get_channels(xml, filter={}):
+ xpath = '//channel%s | //default:channel%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ channel_elems = xml.xpath(xpath)
+ return NITOSv1Channel.get_channel_objs(channel_elems)
+
+ @staticmethod
+ def get_channel_objs(channel_elems):
+ channels = []
+ for channel_elem in channel_elems:
+ channel = Channel(channel_elem.attrib, channel_elem)
+ channel['channel_num'] = channel_elem.attrib['channel_num']
+ channel['frequency'] = channel_elem.attrib['frequency']
+ channel['standard'] = channel_elem.attrib['standard']
+
+ channels.append(channel)
+ return channels
+
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.channel import Channel
+
+
+
+class NITOSv1Lease:
+
+ @staticmethod
+ def add_leases(xml, leases, channels):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(leases) > 0:
+ network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
+ lease_elems.append(lease_elem)
+
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+ # add reserved channels of this lease
+ #channels = [{'channel_id': 1}, {'channel_id': 2}]
+ for channel in channels:
+ if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
+ lease_elem.add_instance('channel', channel, ['channel_num'])
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return NITOSv1Lease.get_lease_objs(lease_elems)
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ channels = []
+ for lease_elem in lease_elems:
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+ #get channels
+ channel_elems = lease_elem.xpath('./default:channel | ./channel')
+ for channel_elem in channel_elems:
+ channel = Channel(channel_elem.attrib, channel_elem)
+ channel['slice_id'] = lease_elem.attrib['slice_id']
+ channel['start_time'] = lease_elem.attrib['start_time']
+ channel['duration'] = lease_elem.attrib['duration']
+ channel['channel_num'] = channel_elem.attrib['channel_num']
+ channels.append(channel)
+
+ return (leases, channels)
+
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.position_3d import Position3D
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+
+
+class NITOSv1Node:
+
+ @staticmethod
+ def add_nodes(xml, nodes):
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+ network_urn = nodes[0]['component_manager_id']
+ network_elem = xml.add_element('network', name = Xrn(network_urn).get_hrn())
+ else:
+ network_elem = xml
+
+ # needs to be improuved to retreive the gateway addr dynamically.
+ gateway_addr = 'nitlab.inf.uth.gr'
+
+ node_elems = []
+ for node in nodes:
+ node_fields = ['component_manager_id', 'component_id', 'boot_state']
+ node_elem = network_elem.add_instance('node', node, node_fields)
+ node_elems.append(node_elem)
+
+ # determine network hrn
+ network_hrn = None
+ if 'component_manager_id' in node and node['component_manager_id']:
+ network_hrn = Xrn(node['component_manager_id']).get_hrn()
+
+ # set component_name attribute and hostname element
+ if 'component_id' in node and node['component_id']:
+ component_name = Xrn(xrn=node['component_id']).get_leaf()
+ node_elem.set('component_name', component_name)
+ hostname_elem = node_elem.add_element('hostname')
+ hostname_elem.set_text(component_name)
+
+ # set site id
+ if 'authority_id' in node and node['authority_id']:
+ node_elem.set('site_id', node['authority_id'])
+
+ # add locaiton
+ location = node.get('location')
+ if location:
+ node_elem.add_instance('location', location, Location.fields)
+
+ # add 3D Position of the node
+ position_3d = node.get('position_3d')
+ if position_3d:
+ node_elem.add_instance('position_3d', position_3d, Position3D.fields)
+
+ # all nitos nodes are exculsive
+ exclusive_elem = node_elem.add_element('exclusive')
+ exclusive_elem.set_text('TRUE')
+
+ # In order to access nitos nodes, one need to pass through the nitos gateway
+ # here we advertise Nitos access gateway address
+ gateway_elem = node_elem.add_element('gateway')
+ gateway_elem.set_text(gateway_addr)
+
+ # add granularity of the reservation system
+ granularity = node.get('granularity')['grain']
+ if granularity:
+ #node_elem.add_instance('granularity', granularity, granularity.fields)
+ granularity_elem = node_elem.add_element('granularity')
+ granularity_elem.set_text(str(granularity))
+ # add hardware type
+ #hardware_type = node.get('hardware_type')
+ #if hardware_type:
+ # node_elem.add_instance('hardware_type', hardware_type)
+ hardware_type_elem = node_elem.add_element('hardware_type')
+ hardware_type_elem.set_text(node.get('hardware_type'))
+
+
+ if isinstance(node.get('interfaces'), list):
+ for interface in node.get('interfaces', []):
+ node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4'])
+
+ #if 'bw_unallocated' in node and node['bw_unallocated']:
+ # bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
+
+ PGv2Services.add_services(node_elem, node.get('services', []))
+ tags = node.get('tags', [])
+ if tags:
+ for tag in tags:
+ tag_elem = node_elem.add_element(tag['tagname'])
+ tag_elem.set_text(tag['value'])
+ NITOSv1Sliver.add_slivers(node_elem, node.get('slivers', []))
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ component_ids = []
+ for sliver in slivers:
+ filter = {}
+ if isinstance(sliver, str):
+ filter['component_id'] = '*%s*' % sliver
+ sliver = {}
+ elif 'component_id' in sliver and sliver['component_id']:
+ filter['component_id'] = '*%s*' % sliver['component_id']
+ if not filter:
+ continue
+ nodes = NITOSv1Node.get_nodes(xml, filter)
+ if not nodes:
+ continue
+ node = nodes[0]
+ NITOSv1Sliver.add_slivers(node, sliver)
+
+ @staticmethod
+ def remove_slivers(xml, hostnames):
+ for hostname in hostnames:
+ nodes = NITOSv1Node.get_nodes(xml, {'component_id': '*%s*' % hostname})
+ for node in nodes:
+ slivers = NITOSv1Sliver.get_slivers(node.element)
+ for sliver in slivers:
+ node.element.remove(sliver.element)
+
+ @staticmethod
+ def get_nodes(xml, filter={}):
+ xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ node_elems = xml.xpath(xpath)
+ return NITOSv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_nodes_with_slivers(xml):
+ xpath = '//node[count(sliver)>0] | //default:node[count(default:sliver)>0]'
+ node_elems = xml.xpath(xpath)
+ return NITOSv1Node.get_node_objs(node_elems)
+
+
+ @staticmethod
+ def get_node_objs(node_elems):
+ nodes = []
+ for node_elem in node_elems:
+ node = Node(node_elem.attrib, node_elem)
+ if 'site_id' in node_elem.attrib:
+ node['authority_id'] = node_elem.attrib['site_id']
+ # get location
+ location_elems = node_elem.xpath('./default:location | ./location')
+ locations = [loc_elem.get_instance(Location) for loc_elem in location_elems]
+ if len(locations) > 0:
+ node['location'] = locations[0]
+ # get bwlimit
+ bwlimit_elems = node_elem.xpath('./default:bw_limit | ./bw_limit')
+ bwlimits = [bwlimit_elem.get_instance(BWlimit) for bwlimit_elem in bwlimit_elems]
+ if len(bwlimits) > 0:
+ node['bwlimit'] = bwlimits[0]
+ # get interfaces
+ iface_elems = node_elem.xpath('./default:interface | ./interface')
+ ifaces = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
+ node['interfaces'] = ifaces
+ # get services
+ node['services'] = PGv2Services.get_services(node_elem)
+ # get slivers
+ node['slivers'] = NITOSv1Sliver.get_slivers(node_elem)
+ # get tags
+ node['tags'] = NITOSv1PLTag.get_pl_tags(node_elem, ignore=Node.fields+["hardware_type"])
+ # get hardware types
+ hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
+ node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
+
+ # temporary... play nice with old slice manager rspec
+ if not node['component_name']:
+ hostname_elem = node_elem.find("hostname")
+ if hostname_elem != None:
+ node['component_name'] = hostname_elem.text
+
+ nodes.append(node)
+ return nodes
+
--- /dev/null
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.pltag import PLTag
+
+class NITOSv1PLTag:
+ @staticmethod
+ def add_pl_tag(xml, name, value):
+ for pl_tag in pl_tags:
+ pl_tag_elem = xml.add_element(name)
+ pl_tag_elem.set_text(value)
+
+ @staticmethod
+ def get_pl_tags(xml, ignore=[]):
+ pl_tags = []
+ for elem in xml.iterchildren():
+ if elem.tag not in ignore:
+ pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})
+ pl_tags.append(pl_tag)
+ return pl_tags
+
--- /dev/null
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XmlElement
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+
+from sfa.planetlab.plxrn import PlXrn
+
+class NITOSv1Sliver:
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ if not slivers:
+ return
+ if not isinstance(slivers, list):
+ slivers = [slivers]
+ for sliver in slivers:
+ sliver_elem = xml.add_instance('sliver', sliver, ['name'])
+ tags = sliver.get('tags', [])
+ if tags:
+ for tag in tags:
+ NITOSv1Sliver.add_sliver_attribute(sliver_elem, tag['tagname'], tag['value'])
+ if sliver.get('sliver_id'):
+ name = PlXrn(xrn=sliver.get('sliver_id')).pl_slicename()
+ sliver_elem.set('name', name)
+
+ @staticmethod
+ def add_sliver_attribute(xml, name, value):
+ elem = xml.add_element(name)
+ elem.set_text(value)
+
+ @staticmethod
+ def get_sliver_attributes(xml):
+ attribs = []
+ for elem in xml.iterchildren():
+ if elem.tag not in Sliver.fields:
+ xml_element = XmlElement(elem, xml.namespaces)
+ instance = Element(fields=xml_element, element=elem)
+ instance['name'] = elem.tag
+ instance['value'] = elem.text
+ attribs.append(instance)
+ return attribs
+
+ @staticmethod
+ def get_slivers(xml, filter={}):
+ xpath = './default:sliver | ./sliver'
+ sliver_elems = xml.xpath(xpath)
+ slivers = []
+ for sliver_elem in sliver_elems:
+ sliver = Sliver(sliver_elem.attrib,sliver_elem)
+ if 'component_id' in xml.attrib:
+ sliver['component_id'] = xml.attrib['component_id']
+ sliver['tags'] = NITOSv1Sliver.get_sliver_attributes(sliver_elem)
+ slivers.append(sliver)
+ return slivers
+
--- /dev/null
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XpathFilter
+
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
+from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
+from sfa.rspecs.elements.lease import Lease
+
+from sfa.planetlab.plxrn import xrn_to_hostname
+
+class PGv2Lease:
+ @staticmethod
+ def add_leases(xml, leases):
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = xml.add_instance('lease', lease[0], lease_fields)
+ lease_elems.append(lease_elem)
+
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return PGv2Lease.get_lease_objs(lease_elems)
+
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ for lease_elem in lease_elems:
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+
+ return leases
from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
+from sfa.rspecs.elements.granularity import Granularity
from sfa.planetlab.plxrn import xrn_to_hostname
# set location
if node.get('location'):
node_elem.add_instance('location', node['location'], Location.fields)
+
+ # set granularity
+ if node['exclusive'] == "true":
+ granularity = node.get('granularity')
+ node_elem.add_instance('granularity', granularity, granularity.fields)
# set interfaces
PGv2Interface.add_interfaces(node_elem, node.get('interfaces'))
#if node.get('interfaces'):
for initscript in node.get('pl_initscripts', []):
slivers['tags'].append({'name': 'initscript', 'value': initscript['name']})
PGv2SliverType.add_slivers(node_elem, slivers)
-
return node_elems
@staticmethod
if len(locations) > 0:
node['location'] = locations[0]
+ # get granularity
+ granularity_elems = node_elem.xpath('./default:granularity | ./granularity')
+ if len(granularity_elems) > 0:
+ node['granularity'] = granularity_elems[0].get_instance(Granularity)
+
# get interfaces
iface_elems = node_elem.xpath('./default:interface | ./interface')
node['interfaces'] = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
for attribute in attributes:
if attribute['name'] == 'initscript':
xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
- elif tag['tagname'] == 'flack_info':
+ elif attribute['tagname'] == 'flack_info':
attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
attrib_dict = eval(tag['value'])
for (key, value) in attrib_dict.items():
else:
network_elem = xml
- lease_elems = []
- for lease in leases:
- lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
- lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
lease_elems.append(lease_elem)
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+
+
+# lease_elems = []
+# for lease in leases:
+# lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+# lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+# lease_elems.append(lease_elem)
+
@staticmethod
def get_leases(xml, filter={}):
@staticmethod
def get_lease_objs(lease_elems):
- leases = []
+ leases = []
for lease_elem in lease_elems:
- lease = Lease(lease_elem.attrib, lease_elem)
- if lease.get('lease_id'):
- lease['lease_id'] = lease_elem.attrib['lease_id']
- lease['component_id'] = lease_elem.attrib['component_id']
- lease['slice_id'] = lease_elem.attrib['slice_id']
- lease['start_time'] = lease_elem.attrib['start_time']
- lease['duration'] = lease_elem.attrib['duration']
-
- leases.append(lease)
- return leases
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+
+ return leases
+
+
+
+
+
+# leases = []
+# for lease_elem in lease_elems:
+# lease = Lease(lease_elem.attrib, lease_elem)
+# if lease.get('lease_id'):
+# lease['lease_id'] = lease_elem.attrib['lease_id']
+# lease['component_id'] = lease_elem.attrib['component_id']
+# lease['slice_id'] = lease_elem.attrib['slice_id']
+# lease['start_time'] = lease_elem.attrib['start_time']
+# lease['duration'] = lease_elem.attrib['duration']
+
+# leases.append(lease)
+# return leases
if location:
node_elem.add_instance('location', location, Location.fields)
- # add granularity of the reservation system
- granularity = node.get('granularity')
- if granularity:
- node_elem.add_instance('granularity', granularity, granularity.fields)
+ # add exclusive tag to distinguish between Reservable and Shared nodes
+ exclusive_elem = node_elem.add_element('exclusive')
+ if node.get('exclusive') and node.get('exclusive') == 'true':
+ exclusive_elem.set_text('TRUE')
+ # add granularity of the reservation system
+ granularity = node.get('granularity')
+ if granularity:
+ node_elem.add_instance('granularity', granularity, granularity.fields)
+ else:
+ exclusive_elem.set_text('FALSE')
if isinstance(node.get('interfaces'), list):
tags = node.get('tags', [])
if tags:
for tag in tags:
- tag_elem = node_elem.add_element(tag['tagname'])
- tag_elem.set_text(tag['value'])
+ # backdoor for FITeagle
+ # Alexander Willner <alexander.willner@tu-berlin.de>
+ if tag['tagname']=="fiteagle_settings":
+ tag_elem = node_elem.add_element(tag['tagname'])
+ for subtag in tag['value']:
+ subtag_elem = tag_elem.add_element('setting')
+ subtag_elem.set('name', str(subtag['tagname']))
+ subtag_elem.set('description', str(subtag['description']))
+ subtag_elem.set_text(subtag['value'])
+ else:
+ tag_elem = node_elem.add_element(tag['tagname'])
+ tag_elem.set_text(tag['value'])
SFAv1Sliver.add_slivers(node_elem, node.get('slivers', []))
@staticmethod
if tags:
for tag in tags:
SFAv1Sliver.add_sliver_attribute(sliver_elem, tag['tagname'], tag['value'])
- if sliver.get('sliver_id'):
- name = PlXrn(xrn=sliver.get('sliver_id')).pl_slicename()
- sliver_elem.set('name', name)
@staticmethod
def add_sliver_attribute(xml, name, value):
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+
+
+#from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
+#from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+
+
+
+class Slabv1Lease:
+
+ @staticmethod
+ def add_leases(xml, leases):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(leases) > 0:
+ network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+ lease_elems = []
+ for lease in leases:
+ lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ lease_elems.append(lease_elem)
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return Slabv1Lease.get_lease_objs(lease_elems)
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ for lease_elem in lease_elems:
+ #get nodes
+ node_elems = lease_elem.xpath('./default:node | ./node')
+ for node_elem in node_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+ lease['component_id'] = node_elem.attrib['component_id']
+ leases.append(lease)
+
+ return leases
--- /dev/null
+
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XpathFilter
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+from sfa.util.sfalogging import logger
+
+class SlabNode(Node):
+ #First get the fields already defined in the class Node
+ fields = list(Node.fields)
+ #Extend it with senslab's specific fields
+ fields.extend (['archi', 'radio', 'mobile','position'])
+
+
+class SlabPosition(Element):
+ fields = ['posx', 'posy','posz']
+
+class SlabLocation(Location):
+ fields = list(Location.fields)
+ fields.extend (['site'])
+
+
+
+
+class Slabv1Node:
+
+ @staticmethod
+ def add_connection_information(xml, ldap_username, sites_set):
+ """ Adds login and ssh connection info in the network item in
+ the xml. Does not create the network element, therefore
+ should be used after add_nodes, which creates the network item.
+
+ """
+ logger.debug(" add_connection_information " )
+ #Get network item in the xml
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+
+ slab_network_dict = {}
+ slab_network_dict['login'] = ldap_username
+
+ slab_network_dict['ssh'] = \
+ ['ssh ' + ldap_username + '@'+site+'.senslab.info' \
+ for site in sites_set]
+ network_elem.set('ssh', \
+ unicode(slab_network_dict['ssh']))
+ network_elem.set('login', unicode( slab_network_dict['login']))
+
+
+ @staticmethod
+ def add_nodes(xml, nodes):
+ #Add network item in the xml
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+ network_urn = nodes[0]['component_manager_id']
+ network_elem = xml.add_element('network', \
+ name = Xrn(network_urn).get_hrn())
+ else:
+ network_elem = xml
+
+ logger.debug("slabv1Node \t add_nodes nodes %s \r\n "%(nodes[0]))
+ node_elems = []
+ #Then add nodes items to the network item in the xml
+ for node in nodes:
+ #Attach this node to the network element
+ node_fields = ['component_manager_id', 'component_id', 'exclusive',\
+ 'boot_state', 'mobile']
+ node_elem = network_elem.add_instance('node', node, node_fields)
+ node_elems.append(node_elem)
+
+ #Set the attibutes of this node element
+ for attribute in node:
+ # set component name
+ if attribute is 'component_id':
+ component_name = node['component_name']
+ node_elem.set('component_name', component_name)
+
+ # set hardware types, extend fields to add Senslab's architecture
+ #and radio type
+
+ if attribute is 'hardware_types':
+ for hardware_type in node.get('hardware_types', []):
+ fields = HardwareType.fields
+ fields.extend(['archi','radio'])
+ node_elem.add_instance('hardware_types', node, fields)
+
+ # set location
+ if attribute is 'location':
+ node_elem.add_instance('location', node['location'], \
+ SlabLocation.fields)
+ # add granularity of the reservation system
+ #TODO put the granularity in network instead SA 18/07/12
+ if attribute is 'granularity' :
+ granularity = node['granularity']
+ if granularity:
+ node_elem.add_instance('granularity', \
+ granularity, granularity.fields)
+
+
+ # set available element
+ if attribute is 'boot_state':
+ if node.get('boot_state').lower() == 'alive':
+ available_elem = node_elem.add_element('available', \
+ now='true')
+ else:
+ available_elem = node_elem.add_element('available', \
+ now='false')
+
+ #set position
+ if attribute is 'position':
+ node_elem.add_instance('position', node['position'], \
+ SlabPosition.fields)
+ ## add services
+ #PGv2Services.add_services(node_elem, node.get('services', []))
+ # add slivers
+ if attribute is 'slivers':
+ slivers = node.get('slivers', [])
+ if not slivers:
+ # we must still advertise the available sliver types
+ slivers = Sliver({'type': 'slab-node'})
+ # we must also advertise the available initscripts
+ #slivers['tags'] = []
+ #if node.get('pl_initscripts'):
+ #for initscript in node.get('pl_initscripts', []):
+ #slivers['tags'].append({'name': 'initscript', \
+ #'value': initscript['name']})
+
+ Slabv1Sliver.add_slivers(node_elem, slivers)
+ return node_elems
+
+
+
+ @staticmethod
+ def get_nodes(xml, filter={}):
+ xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), \
+ XpathFilter.xpath(filter))
+ node_elems = xml.xpath(xpath)
+ return Slabv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_nodes_with_slivers(xml, sliver_filter={}):
+
+ xpath = '//node[count(sliver)>0] | \
+ //default:node[count(default:sliver) > 0]'
+ node_elems = xml.xpath(xpath)
+ logger.debug("SLABV1NODE \tget_nodes_with_slivers \
+ node_elems %s"%(node_elems))
+ return Slabv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_node_objs(node_elems):
+ nodes = []
+ for node_elem in node_elems:
+ node = Node(node_elem.attrib, node_elem)
+ nodes.append(node)
+ if 'component_id' in node_elem.attrib:
+ node['authority_id'] = \
+ Xrn(node_elem.attrib['component_id']).get_authority_urn()
+
+ # get hardware types
+ hardware_type_elems = node_elem.xpath('./default:hardware_type | \
+ ./hardware_type')
+ node['hardware_types'] = [hw_type.get_instance(HardwareType) \
+ for hw_type in hardware_type_elems]
+
+ # get location
+ location_elems = node_elem.xpath('./default:location | ./location')
+ locations = [location_elem.get_instance(Location) \
+ for location_elem in location_elems]
+ if len(locations) > 0:
+ node['location'] = locations[0]
+
+
+ # get interfaces
+ iface_elems = node_elem.xpath('./default:interface | ./interface')
+ node['interfaces'] = [iface_elem.get_instance(Interface) \
+ for iface_elem in iface_elems]
+
+ # get services
+ #node['services'] = PGv2Services.get_services(node_elem)
+
+ # get slivers
+ node['slivers'] = Slabv1Sliver.get_slivers(node_elem)
+ available_elems = node_elem.xpath('./default:available | \
+ ./available')
+ if len(available_elems) > 0 and 'name' in available_elems[0].attrib:
+ if available_elems[0].attrib.get('now', '').lower() == 'true':
+ node['boot_state'] = 'boot'
+ else:
+ node['boot_state'] = 'disabled'
+
+ logger.debug("SLABV1NODE \tget_nodes_objs \
+ #nodes %s"%(nodes))
+ return nodes
+
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ logger.debug("SLABv1NODE \tadd_slivers ")
+ component_ids = []
+ for sliver in slivers:
+ filter_sliver = {}
+ if isinstance(sliver, str):
+ filter_sliver['component_id'] = '*%s*' % sliver
+ sliver = {}
+ elif 'component_id' in sliver and sliver['component_id']:
+ filter_sliver['component_id'] = '*%s*' % sliver['component_id']
+ if not filter_sliver:
+ continue
+ nodes = Slabv1Node.get_nodes(xml, filter_sliver)
+ if not nodes:
+ continue
+ node = nodes[0]
+ Slabv1Sliver.add_slivers(node, sliver)
+
+ @staticmethod
+ def remove_slivers(xml, hostnames):
+ for hostname in hostnames:
+ nodes = Slabv1Node.get_nodes(xml, \
+ {'component_id': '*%s*' % hostname})
+ for node in nodes:
+ slivers = Slabv1Sliver.get_slivers(node.element)
+ for sliver in slivers:
+ node.element.remove(sliver.element)
+
+
+
--- /dev/null
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+
+#from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
+import sys
+class Slabv1Sliver:
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ if not slivers:
+ return
+ if not isinstance(slivers, list):
+ slivers = [slivers]
+ for sliver in slivers:
+ #sliver_elem = xml.add_element('sliver_type')
+ sliver_elem = xml.add_element('sliver')
+ if sliver.get('type'):
+ sliver_elem.set('name', sliver['type'])
+ if sliver.get('client_id'):
+ sliver_elem.set('client_id', sliver['client_id'])
+ #images = sliver.get('disk_images')
+ #if images and isinstance(images, list):
+ #Slabv1DiskImage.add_images(sliver_elem, images)
+ Slabv1Sliver.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
+
+ @staticmethod
+ def add_sliver_attributes(xml, attributes):
+ if attributes:
+ for attribute in attributes:
+ if attribute['name'] == 'initscript':
+ xml.add_element('{%s}initscript' % xml.namespaces['planetlab'], name=attribute['value'])
+ elif tag['tagname'] == 'flack_info':
+ attrib_elem = xml.add_element('{%s}info' % self.namespaces['flack'])
+ attrib_dict = eval(tag['value'])
+ for (key, value) in attrib_dict.items():
+ attrib_elem.set(key, value)
+ @staticmethod
+ def get_slivers(xml, filter={}):
+ xpath = './default:sliver | ./sliver'
+
+ sliver_elems = xml.xpath(xpath)
+ slivers = []
+ for sliver_elem in sliver_elems:
+ sliver = Sliver(sliver_elem.attrib,sliver_elem)
+
+ if 'component_id' in xml.attrib:
+ sliver['component_id'] = xml.attrib['component_id']
+ if 'name' in sliver_elem.attrib:
+ sliver['type'] = sliver_elem.attrib['name']
+ #sliver['images'] = Slabv1DiskImage.get_images(sliver_elem)
+
+ print>>sys.stderr, "\r\n \r\n SLABV1SLIVER.PY \t\t\t get_slivers sliver %s " %( sliver)
+ slivers.append(sliver)
+ return slivers
+
+ @staticmethod
+ def get_sliver_attributes(xml, filter={}):
+ return []
\ No newline at end of file
self.user_options = user_options
self.elements = {}
if rspec:
- self.parse_xml(rspec)
+ if version:
+ self.version = self.version_manager.get_version(version)
+ self.parse_xml(rspec, version)
+ else:
+ self.parse_xml(rspec)
elif version:
self.create(version)
else:
"""
self.version = self.version_manager.get_version(version)
self.namespaces = self.version.namespaces
- self.parse_xml(self.version.template)
+ self.parse_xml(self.version.template, self.version)
# eg. 2011-03-23T19:53:28Z
date_format = '%Y-%m-%dT%H:%M:%SZ'
now = datetime.utcnow()
self.xml.set('generated', generated_ts)
- def parse_xml(self, xml):
+ def parse_xml(self, xml, version=None):
self.xml.parse_xml(xml)
- self.version = None
- if self.xml.schema:
- self.version = self.version_manager.get_version_by_schema(self.xml.schema)
- else:
- #raise InvalidRSpec('unknown rspec schema: %s' % schema)
- # TODO: Should start raising an exception once SFA defines a schema.
- # for now we just default to sfa
- self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
+ if not version:
+ if self.xml.schema:
+ self.version = self.version_manager.get_version_by_schema(self.xml.schema)
+ else:
+ #raise InvalidRSpec('unknown rspec schema: %s' % schema)
+ # TODO: Should start raising an exception once SFA defines a schema.
+ # for now we just default to sfa
+ self.version = self.version_manager.get_version({'type':'sfa','version': '1'})
self.version.xml = self.xml
self.namespaces = self.xml.namespaces
SLIVER_TYPE='SLIVER_TYPE',
LEASE='LEASE',
GRANULARITY='GRANULARITY',
+ SPECTRUM='SPECTRUM',
+ CHANNEL='CHANNEL',
+ POSITION_3D ='POSITION_3D',
)
class RSpecElement:
--- /dev/null
+from copy import deepcopy
+from lxml import etree
+
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn
+from sfa.rspecs.version import RSpecVersion
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
+from sfa.rspecs.elements.versions.nitosv1Node import NITOSv1Node
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1Lease import NITOSv1Lease
+from sfa.rspecs.elements.versions.nitosv1Channel import NITOSv1Channel
+
+class NITOSv1(RSpecVersion):
+ enabled = True
+ type = 'NITOS'
+ content_type = '*'
+ version = '1'
+ schema = None
+ namespace = None
+ extensions = {}
+ namespaces = None
+ template = '<RSpec type="%s"></RSpec>' % type
+
+ # Network
+ def get_networks(self):
+ network_elems = self.xml.xpath('//network')
+ networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+ network_elem in network_elems]
+ return networks
+
+
+ def add_network(self, network):
+ network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+ if not network_tags:
+ network_tag = self.xml.add_element('network', name=network)
+ else:
+ network_tag = network_tags[0]
+ return network_tag
+
+
+ # Nodes
+
+ def get_nodes(self, filter=None):
+ return NITOSv1Node.get_nodes(self.xml, filter)
+
+ def get_nodes_with_slivers(self):
+ return NITOSv1Node.get_nodes_with_slivers(self.xml)
+
+ def add_nodes(self, nodes, network = None, no_dupes=False):
+ NITOSv1Node.add_nodes(self.xml, nodes)
+
+ def merge_node(self, source_node_tag, network, no_dupes=False):
+ if no_dupes and self.get_node_element(node['hostname']):
+ # node already exists
+ return
+
+ network_tag = self.add_network(network)
+ network_tag.append(deepcopy(source_node_tag))
+
+ # Slivers
+
+ def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+ # add slice name to network tag
+ network_tags = self.xml.xpath('//network')
+ if network_tags:
+ network_tag = network_tags[0]
+ network_tag.set('slice', urn_to_hrn(sliver_urn)[0])
+
+ # add slivers
+ sliver = {'name':sliver_urn,
+ 'pl_tags': attributes}
+ for hostname in hostnames:
+ if sliver_urn:
+ sliver['name'] = sliver_urn
+ node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+ if not node_elems:
+ continue
+ node_elem = node_elems[0]
+ NITOSv1Sliver.add_slivers(node_elem.element, sliver)
+
+ # remove all nodes without slivers
+ if not append:
+ for node_elem in self.get_nodes():
+ if not node_elem['slivers']:
+ parent = node_elem.element.getparent()
+ parent.remove(node_elem.element)
+
+
+ def remove_slivers(self, slivers, network=None, no_dupes=False):
+ NITOSv1Node.remove_slivers(self.xml, slivers)
+
+ def get_slice_attributes(self, network=None):
+ attributes = []
+ nodes_with_slivers = self.get_nodes_with_slivers()
+ for default_attribute in self.get_default_sliver_attributes(network):
+ attribute = default_attribute.copy()
+ attribute['node_id'] = None
+ attributes.append(attribute)
+ for node in nodes_with_slivers:
+ nodename=node['component_name']
+ sliver_attributes = self.get_sliver_attributes(nodename, network)
+ for sliver_attribute in sliver_attributes:
+ sliver_attribute['node_id'] = nodename
+ attributes.append(sliver_attribute)
+ return attributes
+
+
+ def add_sliver_attribute(self, component_id, name, value, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' % component_id})
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ slivers = NITOSv1Sliver.get_slivers(node)
+ if slivers:
+ sliver = slivers[0]
+ NITOSv1Sliver.add_sliver_attribute(sliver, name, value)
+ else:
+ # should this be an assert / raise an exception?
+ logger.error("WARNING: failed to find component_id %s" % component_id)
+
+ def get_sliver_attributes(self, component_id, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' % component_id})
+ attribs = []
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ slivers = NITOSv1Sliver.get_slivers(node.element)
+ if slivers is not None and isinstance(slivers, list) and len(slivers) > 0:
+ sliver = slivers[0]
+ attribs = NITOSv1Sliver.get_sliver_attributes(sliver.element)
+ return attribs
+
+ def remove_sliver_attribute(self, component_id, name, value, network=None):
+ attribs = self.get_sliver_attributes(component_id)
+ for attrib in attribs:
+ if attrib['name'] == name and attrib['value'] == value:
+ #attrib.element.delete()
+ parent = attrib.element.getparent()
+ parent.remove(attrib.element)
+
+ def add_default_sliver_attribute(self, name, value, network=None):
+ if network:
+ defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
+ else:
+ defaults = self.xml.xpath("//sliver_defaults")
+ if not defaults:
+ if network:
+ network_tag = self.xml.xpath("//network[@name='%s']" % network)
+ else:
+ network_tag = self.xml.xpath("//network")
+ if isinstance(network_tag, list):
+ network_tag = network_tag[0]
+ defaults = network_tag.add_element('sliver_defaults')
+ elif isinstance(defaults, list):
+ defaults = defaults[0]
+ NITOSv1Sliver.add_sliver_attribute(defaults, name, value)
+
+ def get_default_sliver_attributes(self, network=None):
+ if network:
+ defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
+ else:
+ defaults = self.xml.xpath("//sliver_defaults")
+ if not defaults: return []
+ return NITOSv1Sliver.get_sliver_attributes(defaults[0])
+
+ def remove_default_sliver_attribute(self, name, value, network=None):
+ attribs = self.get_default_sliver_attributes(network)
+ for attrib in attribs:
+ if attrib['name'] == name and attrib['value'] == value:
+ #attrib.element.delete()
+ parent = attrib.element.getparent()
+ parent.remove(attrib.element)
+
+ # Links
+
+ def get_links(self, network=None):
+ return PGv2Link.get_links(self.xml)
+
+ def get_link_requests(self):
+ return PGv2Link.get_link_requests(self.xml)
+
+ def add_links(self, links):
+ networks = self.get_networks()
+ if len(networks) > 0:
+ xml = networks[0].element
+ else:
+ xml = self.xml
+ PGv2Link.add_links(xml, links)
+
+ def add_link_requests(self, links):
+ PGv2Link.add_link_requests(self.xml, links)
+
+ # utility
+
+ def merge(self, in_rspec):
+ """
+ Merge contents for specified rspec with current rspec
+ """
+
+ if not in_rspec:
+ return
+
+ from sfa.rspecs.rspec import RSpec
+ if isinstance(in_rspec, RSpec):
+ rspec = in_rspec
+ else:
+ rspec = RSpec(in_rspec)
+ if rspec.version.type.lower() == 'protogeni':
+ from sfa.rspecs.rspec_converter import RSpecConverter
+ in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
+ rspec = RSpec(in_rspec)
+
+ # just copy over all networks
+ current_networks = self.get_networks()
+ networks = rspec.version.get_networks()
+ for network in networks:
+ current_network = network.get('name')
+ if current_network and current_network not in current_networks:
+ self.xml.append(network.element)
+ current_networks.append(current_network)
+
+ # Leases
+
+ def get_leases(self, filter=None):
+ return NITOSv1Lease.get_leases(self.xml, filter)
+
+ def add_leases(self, leases, channels, network = None, no_dupes=False):
+ NITOSv1Lease.add_leases(self.xml, leases, channels)
+
+ # Spectrum
+
+ def get_channels(self, filter=None):
+ return NITOSv1Channel.get_channels(self.xml, filter)
+
+ def add_channels(self, channels, network = None, no_dupes=False):
+ NITOSv1Channel.add_channels(self.xml, channels)
+
+
+
+if __name__ == '__main__':
+ from sfa.rspecs.rspec import RSpec
+ from sfa.rspecs.rspec_elements import *
+ r = RSpec('/tmp/resources.rspec')
+ r.load_rspec_elements(SFAv1.elements)
+ print r.get(RSpecElements.NODE)
from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
from sfa.rspecs.elements.versions.pgv2Node import PGv2Node
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
+from sfa.rspecs.elements.versions.pgv2Lease import PGv2Lease
class PGv2(RSpecVersion):
type = 'ProtoGENI'
# set the sliver id
#slice_id = sliver_info.get('slice_id', -1)
#node_id = sliver_info.get('node_id', -1)
+ #sliver_id = Xrn(xrn=sliver_urn, type='slice', id=str(node_id)).get_urn()
#node_elem.set('sliver_id', sliver_id)
# add the sliver type elemnt
# Leases
def get_leases(self, filter=None):
- return []
+ return PGv2Lease.get_leases(self.xml, filter)
def add_leases(self, leases, network = None, no_dupes=False):
- return None
+ PGv2Lease.add_leases(self.xml, leases)
# Utility
nodes = in_rspec.version.get_nodes()
# protogeni rspecs need to advertise the availabel sliver types
+ main_nodes = []
for node in nodes:
+ if not node['component_name']:
+ # this node element is part of a lease
+ continue
if not node.has_key('sliver') or not node['sliver']:
node['sliver'] = {'name': 'plab-vserver'}
-
- self.add_nodes(nodes)
+ main_nodes.append(node)
+ self.add_nodes(main_nodes)
self.add_links(in_rspec.version.get_links())
+ # Leases
+ leases = in_rspec.version.get_leases()
+ self.add_leases(leases)
#
#rspec = RSpec(in_rspec)
#for child in rspec.xml.iterchildren():
--- /dev/null
+from copy import deepcopy
+
+
+from sfa.rspecs.version import RSpecVersion
+import sys
+from sfa.rspecs.elements.versions.slabv1Lease import Slabv1Lease
+from sfa.rspecs.elements.versions.slabv1Node import Slabv1Node
+from sfa.rspecs.elements.versions.slabv1Sliver import Slabv1Sliver
+
+
+from sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease
+
+from sfa.util.sfalogging import logger
+
+class Slabv1(RSpecVersion):
+ #enabled = True
+ type = 'Slab'
+ content_type = 'ad'
+ version = '1'
+ #template = '<RSpec type="%s"></RSpec>' % type
+
+ schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+ namespace = 'http://www.geni.net/resources/rspec/3'
+ extensions = {
+ 'flack': "http://www.protogeni.net/resources/rspec/ext/flack/1",
+ 'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
+ }
+ namespaces = dict(extensions.items() + [('default', namespace)])
+ elements = []
+
+ # Network
+ def get_networks(self):
+ #WARNING Added //default:network to the xpath
+ #otherwise network element not detected 16/07/12 SA
+
+ network_elems = self.xml.xpath('//network | //default:network')
+ networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+ network_elem in network_elems]
+ return networks
+
+
+ def add_network(self, network):
+ network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+ if not network_tags:
+ network_tag = self.xml.add_element('network', name=network)
+ else:
+ network_tag = network_tags[0]
+ return network_tag
+
+
+ # Nodes
+
+ def get_nodes(self, filter=None):
+ return Slabv1Node.get_nodes(self.xml, filter)
+
+ def get_nodes_with_slivers(self):
+ return Slabv1Node.get_nodes_with_slivers(self.xml)
+
+ def get_slice_timeslot(self ):
+ return Slabv1Timeslot.get_slice_timeslot(self.xml)
+
+ def add_connection_information(self, ldap_username, sites_set):
+ return Slabv1Node.add_connection_information(self.xml,ldap_username, sites_set)
+
+ def add_nodes(self, nodes, check_for_dupes=False):
+ return Slabv1Node.add_nodes(self.xml,nodes )
+
+ def merge_node(self, source_node_tag, network, no_dupes = False):
+ logger.debug("SLABV1 merge_node")
+ #if no_dupes and self.get_node_element(node['hostname']):
+ ## node already exists
+ #return
+ network_tag = self.add_network(network)
+ network_tag.append(deepcopy(source_node_tag))
+
+ # Slivers
+
+ def get_sliver_attributes(self, hostname, node, network=None):
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes hostname %s " %(hostname)
+ nodes = self.get_nodes({'component_id': '*%s*' %hostname})
+ attribs = []
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes-----------------nodes %s " %(nodes)
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ #if node :
+ #sliver = node.xpath('./default:sliver | ./sliver')
+ #sliver = node.xpath('./default:sliver', namespaces=self.namespaces)
+ sliver = node['slivers']
+
+ if sliver is not None and isinstance(sliver, list) and len(sliver) > 0:
+ sliver = sliver[0]
+ attribs = sliver
+ #attribs = self.attributes_list(sliver)
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t SLABV1.PY get_sliver_attributes----------NN------- sliver %s self.namespaces %s attribs %s " %(sliver, self.namespaces,attribs)
+ return attribs
+
+ def get_slice_attributes(self, network=None):
+
+ slice_attributes = []
+
+ nodes_with_slivers = self.get_nodes_with_slivers()
+
+ # TODO: default sliver attributes in the PG rspec?
+ default_ns_prefix = self.namespaces['default']
+ for node in nodes_with_slivers:
+ sliver_attributes = self.get_sliver_attributes(node['component_id'],node, network)
+ for sliver_attribute in sliver_attributes:
+ name = str(sliver_attribute[0])
+ text = str(sliver_attribute[1])
+ attribs = sliver_attribute[2]
+ # we currently only suppor the <initscript> and <flack> attributes
+ #if 'info' in name:
+ #attribute = {'name': 'flack_info', 'value': str(attribs), 'node_id': node}
+ #slice_attributes.append(attribute)
+ #elif 'initscript' in name:
+ if 'initscript' in name:
+ if attribs is not None and 'name' in attribs:
+ value = attribs['name']
+ else:
+ value = text
+ attribute = {'name': 'initscript', 'value': value, 'node_id': node}
+ slice_attributes.append(attribute)
+
+
+ return slice_attributes
+
+ def attributes_list(self, elem):
+ opts = []
+ if elem is not None:
+ for e in elem:
+ opts.append((e.tag, str(e.text).strip(), e.attrib))
+ return opts
+
+ def get_default_sliver_attributes(self, network=None):
+ return []
+
+ def add_default_sliver_attribute(self, name, value, network=None):
+ pass
+
+ def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+ # all nodes hould already be present in the rspec. Remove all
+ # nodes that done have slivers
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers ----->get_node "
+ for hostname in hostnames:
+ node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+ if not node_elems:
+ continue
+ node_elem = node_elems[0]
+
+ # determine sliver types for this node
+ #TODO : add_slivers valid type of sliver needs to be changed 13/07/12 SA
+ valid_sliver_types = ['slab-node', 'emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+ #valid_sliver_types = ['emulab-openvz', 'raw-pc', 'plab-vserver', 'plab-vnode']
+ requested_sliver_type = None
+ for sliver_type in node_elem.get('slivers', []):
+ if sliver_type.get('type') in valid_sliver_types:
+ requested_sliver_type = sliver_type['type']
+
+ if not requested_sliver_type:
+ continue
+ sliver = {'type': requested_sliver_type,
+ 'pl_tags': attributes}
+ print>>sys.stderr, "\r\n \r\n \r\n \t\t\t SLABv1.PY add_slivers node_elem %s sliver_type %s \r\n \r\n " %(node_elem, sliver_type)
+ # remove available element
+ for available_elem in node_elem.xpath('./default:available | ./available'):
+ node_elem.remove(available_elem)
+
+ # remove interface elements
+ for interface_elem in node_elem.xpath('./default:interface | ./interface'):
+ node_elem.remove(interface_elem)
+
+ # remove existing sliver_type elements
+ for sliver_type in node_elem.get('slivers', []):
+ node_elem.element.remove(sliver_type.element)
+
+ # set the client id
+ node_elem.element.set('client_id', hostname)
+ if sliver_urn:
+ pass
+ # TODO
+ # set the sliver id
+ #slice_id = sliver_info.get('slice_id', -1)
+ #node_id = sliver_info.get('node_id', -1)
+ #sliver_id = urn_to_sliver_id(sliver_urn, slice_id, node_id)
+ #node_elem.set('sliver_id', sliver_id)
+
+ # add the sliver type elemnt
+ Slabv1Sliver.add_slivers(node_elem.element, sliver)
+ #Slabv1SliverType.add_slivers(node_elem.element, sliver)
+
+ # remove all nodes without slivers
+ if not append:
+ for node_elem in self.get_nodes():
+ if not node_elem['client_id']:
+ parent = node_elem.element.getparent()
+ parent.remove(node_elem.element)
+
+ def remove_slivers(self, slivers, network=None, no_dupes=False):
+ Slabv1Node.remove_slivers(self.xml, slivers)
+
+
+ # Utility
+
+ def merge(self, in_rspec):
+ """
+ Merge contents for specified rspec with current rspec
+ """
+
+ if not in_rspec:
+ return
+
+ from sfa.rspecs.rspec import RSpec
+
+ if isinstance(in_rspec, RSpec):
+ rspec = in_rspec
+ else:
+ rspec = RSpec(in_rspec)
+ if rspec.version.type.lower() == 'protogeni':
+ from sfa.rspecs.rspec_converter import RSpecConverter
+ in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
+ rspec = RSpec(in_rspec)
+ # just copy over all networks
+ #Attention special get_networks using //default:network xpath
+ current_networks = self.get_networks()
+ networks = rspec.version.get_networks()
+ for network in networks:
+ current_network = network.get('name')
+ if current_network and current_network not in current_networks:
+ self.xml.append(network.element)
+ current_networks.append(current_network)
+
+
+
+
+
+ # Leases
+
+ def get_leases(self, lease_filter=None):
+ return SFAv1Lease.get_leases(self.xml, lease_filter)
+ #return Slabv1Lease.get_leases(self.xml, lease_filter)
+
+ def add_leases(self, leases, network = None, no_dupes=False):
+ SFAv1Lease.add_leases(self.xml, leases)
+ #Slabv1Lease.add_leases(self.xml, leases)
+
+ def cleanup(self):
+ # remove unncecessary elements, attributes
+ if self.type in ['request', 'manifest']:
+ # remove 'available' element from remaining node elements
+ self.xml.remove_element('//default:available | //available')
+
+
+class Slabv1Ad(Slabv1):
+ enabled = True
+ content_type = 'ad'
+ schema = 'http://senslab.info/resources/rspec/1/ad.xsd'
+ #http://www.geni.net/resources/rspec/3/ad.xsd'
+ template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Request(Slabv1):
+ enabled = True
+ content_type = 'request'
+ schema = 'http://senslab.info/resources/rspec/1/request.xsd'
+ #http://www.geni.net/resources/rspec/3/request.xsd
+ template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+class Slabv1Manifest(Slabv1):
+ enabled = True
+ content_type = 'manifest'
+ schema = 'http://senslab.info/resources/rspec/1/manifest.xsd'
+ #http://www.geni.net/resources/rspec/3/manifest.xsd
+ template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://senslab.info/resources/rspec/1" xmlns:flack="http://senslab.info/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://senslab.info/resources/rspec/1 http://senslab.info/resources/rspec/1/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+
+
+if __name__ == '__main__':
+ from sfa.rspecs.rspec import RSpec
+ from sfa.rspecs.rspec_elements import *
+ r = RSpec('/tmp/slab.rspec')
+ r.load_rspec_elements(Slabv1.elements)
+ r.namespaces = Slabv1.namespaces
+ print r.get(RSpecElements.NODE)
--- /dev/null
+import random
+from passlib.hash import ldap_salted_sha1 as lssha
+from sfa.util.xrn import get_authority
+import ldap
+from sfa.util.config import Config
+
+
+import ldap.modlist as modlist
+from sfa.util.sfalogging import logger
+import os.path
+
+#API for OpenLDAP
+
+
+class LdapConfig():
+ def __init__(self, config_file = '/etc/sfa/ldap_config.py'):
+
+ try:
+ execfile(config_file, self.__dict__)
+
+ self.config_file = config_file
+ # path to configuration data
+ self.config_path = os.path.dirname(config_file)
+ except IOError:
+ raise IOError, "Could not find or load the configuration file: %s" \
+ % config_file
+
+
+class ldap_co:
+ """ Set admin login and server configuration variables."""
+
+ def __init__(self):
+ #Senslab PROD LDAP parameters
+ self.ldapserv = None
+ ldap_config = LdapConfig()
+ self.config = ldap_config
+ self.ldapHost = ldap_config.LDAP_IP_ADDRESS
+ self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN
+ self.ldapGroupDN = ldap_config.LDAP_GROUP_DN
+ self.ldapAdminDN = ldap_config.LDAP_WEB_DN
+ self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD
+
+
+ self.ldapPort = ldap.PORT
+ self.ldapVersion = ldap.VERSION3
+ self.ldapSearchScope = ldap.SCOPE_SUBTREE
+
+
+ def connect(self, bind = True):
+ """Enables connection to the LDAP server.
+ Set the bind parameter to True if a bind is needed
+ (for add/modify/delete operations).
+ Set to False otherwise.
+
+ """
+ try:
+ self.ldapserv = ldap.open(self.ldapHost)
+ except ldap.LDAPError, error:
+ return {'bool' : False, 'message' : error }
+
+ # Bind with authentification
+ if(bind):
+ return self.bind()
+
+ else:
+ return {'bool': True}
+
+ def bind(self):
+ """ Binding method. """
+ try:
+ # Opens a connection after a call to ldap.open in connect:
+ self.ldapserv = ldap.initialize("ldap://" + self.ldapHost)
+
+ # Bind/authenticate with a user with apropriate
+ #rights to add objects
+ self.ldapserv.simple_bind_s(self.ldapAdminDN, \
+ self.ldapAdminPassword)
+
+ except ldap.LDAPError, error:
+ return {'bool' : False, 'message' : error }
+
+ return {'bool': True}
+
+ def close(self):
+ """ Close the LDAP connection """
+ try:
+ self.ldapserv.unbind_s()
+ except ldap.LDAPError, error:
+ return {'bool' : False, 'message' : error }
+
+
+class LDAPapi :
+ def __init__(self):
+ logger.setLevelDebug()
+ #SFA related config
+
+ config = Config()
+
+ self.authname = config.SFA_REGISTRY_ROOT_AUTH
+
+ self.conn = ldap_co()
+ self.ldapUserQuotaNFS = self.conn.config.LDAP_USER_QUOTA_NFS
+ self.ldapUserUidNumberMin = self.conn.config.LDAP_USER_UID_NUMBER_MIN
+ self.ldapUserGidNumber = self.conn.config.LDAP_USER_GID_NUMBER
+ self.ldapUserHomePath = self.conn.config.LDAP_USER_HOME_PATH
+
+ self.lengthPassword = 8
+ self.baseDN = self.conn.ldapPeopleDN
+
+
+
+ self.charsPassword = [ '!', '$', '(',')', '*', '+', ',', '-', '.', \
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', \
+ '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', \
+ 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', \
+ 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', \
+ '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', \
+ 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p' ,'q', \
+ 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', \
+ '\'']
+
+ self.ldapShell = '/bin/bash'
+
+
+ def generate_login(self, record):
+ """Generate login for adding a new user in LDAP Directory
+ (four characters minimum length)
+ Record contains first name and last name.
+
+ """
+ if 'first_name' in record and 'last_name' in record:
+ #Remove all special characters from first_name/last name
+ lower_first_name = record['first_name'].replace('-','')\
+ .replace('_','').replace('[','')\
+ .replace(']','').replace(' ','')\
+ .lower()
+ lower_last_name = record['last_name'].replace('-','')\
+ .replace('_','').replace('[','')\
+ .replace(']','').replace(' ','')\
+ .lower()
+
+
+ #No first name and last name
+ #check email
+ else:
+ #For compatibility with other ldap func
+ if 'mail' in record and 'email' not in record:
+ record['email'] = record['mail']
+ email = record['email']
+ email = email.split('@')[0].lower()
+ lower_first_name = None
+ lower_last_name = None
+ #Assume there is first name and last name in email
+ #if there is a separator
+ separator_list = ['.', '_', '-']
+ for sep in separator_list:
+ if sep in email:
+ mail = email.split(sep)
+ lower_first_name = mail[0]
+ lower_last_name = mail[1]
+ break
+ #Otherwise just take the part before the @ as the
+ #lower_first_name and lower_last_name
+ if lower_first_name is None:
+ lower_first_name = email
+ lower_last_name = email
+
+ length_last_name = len(lower_last_name)
+ login_max_length = 8
+
+ #Try generating a unique login based on first name and last name
+ getAttrs = ['uid']
+ if length_last_name >= login_max_length :
+ login = lower_last_name[0:login_max_length]
+ index = 0
+ logger.debug("login : %s index : %s" %(login, index))
+ elif length_last_name >= 4 :
+ login = lower_last_name
+ index = 0
+ logger.debug("login : %s index : %s" %(login, index))
+ elif length_last_name == 3 :
+ login = lower_first_name[0:1] + lower_last_name
+ index = 1
+ logger.debug("login : %s index : %s" %(login, index))
+ elif length_last_name == 2:
+ if len ( lower_first_name) >=2:
+ login = lower_first_name[0:2] + lower_last_name
+ index = 2
+ logger.debug("login : %s index : %s" %(login, index))
+ else:
+ logger.error("LoginException : \
+ Generation login error with \
+ minimum four characters")
+
+
+ else :
+ logger.error("LDAP generate_login failed : \
+ impossible to generate unique login for %s %s" \
+ %(lower_first_name,lower_last_name))
+
+ login_filter = '(uid=' + login + ')'
+
+ try :
+ #Check if login already in use
+ while (len(self.LdapSearch(login_filter, getAttrs)) is not 0 ):
+
+ index += 1
+ if index >= 9:
+ logger.error("LoginException : Generation login error \
+ with minimum four characters")
+ else:
+ try:
+ login = lower_first_name[0:index] + \
+ lower_last_name[0:login_max_length-index]
+ login_filter = '(uid='+ login+ ')'
+ except KeyError:
+ print "lower_first_name - lower_last_name too short"
+
+ logger.debug("LDAP.API \t generate_login login %s" %(login))
+ return login
+
+ except ldap.LDAPError, error :
+ logger.log_exc("LDAP generate_login Error %s" %error)
+ return None
+
+
+
+ def generate_password(self):
+
+ """Generate password for adding a new user in LDAP Directory
+ (8 characters length) return password
+
+ """
+ password = str()
+ length = len(self.charsPassword)
+ for index in range(self.lengthPassword):
+ char_index = random.randint(0, length-1)
+ password += self.charsPassword[char_index]
+
+ return password
+
+ @staticmethod
+ def encrypt_password( password):
+ """ Use passlib library to make a RFC2307 LDAP encrypted password
+ salt size = 8, use sha-1 algorithm. Returns encrypted password.
+
+ """
+ #Keep consistency with Java Senslab's LDAP API
+ #RFC2307SSHAPasswordEncryptor so set the salt size to 8 bytes
+ return lssha.encrypt(password, salt_size = 8)
+
+
+
+ def find_max_uidNumber(self):
+
+ """Find the LDAP max uidNumber (POSIX uid attribute) .
+ Used when adding a new user in LDAP Directory
+ returns string max uidNumber + 1
+
+ """
+ #First, get all the users in the LDAP
+ getAttrs = "(uidNumber=*)"
+ login_filter = ['uidNumber']
+
+ result_data = self.LdapSearch(getAttrs, login_filter)
+ #It there is no user in LDAP yet, First LDAP user
+ if result_data == []:
+ max_uidnumber = self.ldapUserUidNumberMin
+ #Otherwise, get the highest uidNumber
+ else:
+
+ uidNumberList = [int(r[1]['uidNumber'][0])for r in result_data ]
+ logger.debug("LDAPapi.py \tfind_max_uidNumber \
+ uidNumberList %s " %(uidNumberList))
+ max_uidnumber = max(uidNumberList) + 1
+
+ return str(max_uidnumber)
+
+
+ def get_ssh_pkey(self, record):
+ """TODO ; Get ssh public key from sfa record
+ To be filled by N. Turro ? or using GID pl way?
+
+ """
+ return 'A REMPLIR '
+
+ @staticmethod
+ def make_ldap_filters_from_record( record=None):
+ """TODO Handle OR filtering in the ldap query when
+ dealing with a list of records instead of doing a for loop in GetPersons
+ Helper function to make LDAP filter requests out of SFA records.
+ """
+ req_ldap = ''
+ req_ldapdict = {}
+ if record :
+ if 'first_name' in record and 'last_name' in record:
+ req_ldapdict['cn'] = str(record['first_name'])+" "\
+ + str(record['last_name'])
+ if 'email' in record :
+ req_ldapdict['mail'] = record['email']
+ if 'mail' in record:
+ req_ldapdict['mail'] = record['mail']
+ if 'enabled' in record:
+ if record['enabled'] == True :
+ req_ldapdict['shadowExpire'] = '-1'
+ else:
+ req_ldapdict['shadowExpire'] = '0'
+
+ #Hrn should not be part of the filter because the hrn
+ #presented by a certificate of a SFA user not imported in
+ #Senslab does not include the senslab login in it
+ #Plus, the SFA user may already have an account with senslab
+ #using another login.
+
+
+
+ logger.debug("\r\n \t LDAP.PY make_ldap_filters_from_record \
+ record %s req_ldapdict %s" \
+ %(record, req_ldapdict))
+
+ for k in req_ldapdict:
+ req_ldap += '('+ str(k)+ '=' + str(req_ldapdict[k]) + ')'
+ if len(req_ldapdict.keys()) >1 :
+ req_ldap = req_ldap[:0]+"(&"+req_ldap[0:]
+ size = len(req_ldap)
+ req_ldap = req_ldap[:(size-1)] +')'+ req_ldap[(size-1):]
+ else:
+ req_ldap = "(cn=*)"
+
+ return req_ldap
+
+ def make_ldap_attributes_from_record(self, record):
+ """When addind a new user to Senslab's LDAP, creates an attributes
+ dictionnary from the SFA record.
+
+ """
+
+ attrs = {}
+ attrs['objectClass'] = ["top", "person", "inetOrgPerson", \
+ "organizationalPerson", "posixAccount", \
+ "shadowAccount", "systemQuotas", \
+ "ldapPublicKey"]
+
+
+ attrs['uid'] = self.generate_login(record)
+ try:
+ attrs['givenName'] = str(record['first_name']).lower().capitalize()
+ attrs['sn'] = str(record['last_name']).lower().capitalize()
+ attrs['cn'] = attrs['givenName'] + ' ' + attrs['sn']
+ attrs['gecos'] = attrs['givenName'] + ' ' + attrs['sn']
+
+ except KeyError:
+ attrs['givenName'] = attrs['uid']
+ attrs['sn'] = attrs['uid']
+ attrs['cn'] = attrs['uid']
+ attrs['gecos'] = attrs['uid']
+
+
+ attrs['quota'] = self.ldapUserQuotaNFS
+ attrs['homeDirectory'] = self.ldapUserHomePath + attrs['uid']
+ attrs['loginShell'] = self.ldapShell
+ attrs['gidNumber'] = self.ldapUserGidNumber
+ attrs['uidNumber'] = self.find_max_uidNumber()
+ attrs['mail'] = record['mail'].lower()
+ try:
+ attrs['sshPublicKey'] = record['pkey']
+ except KeyError:
+ attrs['sshPublicKey'] = self.get_ssh_pkey(record)
+
+
+ #Password is automatically generated because SFA user don't go
+ #through the Senslab website used to register new users,
+ #There is no place in SFA where users can enter such information
+ #yet.
+ #If the user wants to set his own password , he must go to the Senslab
+ #website.
+ password = self.generate_password()
+ attrs['userPassword'] = self.encrypt_password(password)
+
+ #Account automatically validated (no mail request to admins)
+ #Set to 0 to disable the account, -1 to enable it,
+ attrs['shadowExpire'] = '-1'
+
+ #Motivation field in Senslab
+ attrs['description'] = 'SFA USER FROM OUTSIDE SENSLAB'
+
+ attrs['ou'] = 'SFA' #Optional: organizational unit
+ #No info about those here:
+ attrs['l'] = 'To be defined'#Optional: Locality.
+ attrs['st'] = 'To be defined' #Optional: state or province (country).
+
+ return attrs
+
+
+
+ def LdapAddUser(self, record) :
+ """Add SFA user to LDAP if it is not in LDAP yet. """
+ logger.debug(" \r\n \t LDAP LdapAddUser \r\n\r\n =====================================================\r\n ")
+ user_ldap_attrs = self.make_ldap_attributes_from_record(record)
+
+
+ #Check if user already in LDAP wih email, first name and last name
+ filter_by = self.make_ldap_filters_from_record(user_ldap_attrs)
+ user_exist = self.LdapSearch(filter_by)
+ if user_exist:
+ logger.warning(" \r\n \t LDAP LdapAddUser user %s %s \
+ already exists" %(user_ldap_attrs['sn'], \
+ user_ldap_attrs['mail']))
+ return {'bool': False}
+
+ #Bind to the server
+ result = self.conn.connect()
+
+ if(result['bool']):
+
+ # A dict to help build the "body" of the object
+
+ logger.debug(" \r\n \t LDAP LdapAddUser attrs %s " %user_ldap_attrs)
+
+ # The dn of our new entry/object
+ dn = 'uid=' + user_ldap_attrs['uid'] + "," + self.baseDN
+
+ try:
+ ldif = modlist.addModlist(user_ldap_attrs)
+ logger.debug("LDAPapi.py add attrs %s \r\n ldif %s"\
+ %(user_ldap_attrs, ldif) )
+ self.conn.ldapserv.add_s(dn, ldif)
+
+ logger.info("Adding user %s login %s in LDAP" \
+ %(user_ldap_attrs['cn'] , user_ldap_attrs['uid']))
+
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP Add Error %s" %error)
+ return {'bool' : False, 'message' : error }
+
+ self.conn.close()
+ return {'bool': True, 'uid':user_ldap_attrs['uid']}
+ else:
+ return result
+
+
+ def LdapDelete(self, person_dn):
+ """
+ Deletes a person in LDAP. Uses the dn of the user.
+ """
+ #Connect and bind
+ result = self.conn.connect()
+ if(result['bool']):
+ try:
+ self.conn.ldapserv.delete_s(person_dn)
+ self.conn.close()
+ return {'bool': True}
+
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP Delete Error %s" %error)
+ return {'bool': False}
+
+
+ def LdapDeleteUser(self, record_filter):
+ """
+ Deletes a SFA person in LDAP, based on the user's hrn.
+ """
+ #Find uid of the person
+ person = self.LdapFindUser(record_filter, [])
+ logger.debug("LDAPapi.py \t LdapDeleteUser record %s person %s" \
+ %(record_filter, person))
+
+ if person:
+ dn = 'uid=' + person['uid'] + "," + self.baseDN
+ else:
+ return {'bool': False}
+
+ result = self.LdapDelete(dn)
+ return result
+
+
+ def LdapModify(self, dn, old_attributes_dict, new_attributes_dict):
+ """ Modifies a LDAP entry """
+
+ ldif = modlist.modifyModlist(old_attributes_dict, new_attributes_dict)
+ # Connect and bind/authenticate
+ result = self.conn.connect()
+ if (result['bool']):
+ try:
+ self.conn.ldapserv.modify_s(dn, ldif)
+ self.conn.close()
+ return {'bool' : True }
+ except ldap.LDAPError, error:
+ logger.log_exc("LDAP LdapModify Error %s" %error)
+ return {'bool' : False }
+
+
+ def LdapModifyUser(self, user_record, new_attributes_dict):
+ """
+ Gets the record from one user_uid_login based on record_filter
+ and changes the attributes according to the specified new_attributes.
+ Does not use this if we need to modify the uid. Use a ModRDN
+ #operation instead ( modify relative DN )
+ """
+ if user_record is None:
+ logger.error("LDAP \t LdapModifyUser Need user record ")
+ return {'bool': False}
+
+ #Get all the attributes of the user_uid_login
+ #person = self.LdapFindUser(record_filter,[])
+ req_ldap = self.make_ldap_filters_from_record(user_record)
+ person_list = self.LdapSearch(req_ldap, [])
+ logger.debug("LDAPapi.py \t LdapModifyUser person_list : %s" \
+ %(person_list))
+ if person_list and len(person_list) > 1 :
+ logger.error("LDAP \t LdapModifyUser Too many users returned")
+ return {'bool': False}
+ if person_list is None :
+ logger.error("LDAP \t LdapModifyUser User %s doesn't exist "\
+ %(user_record))
+ return {'bool': False}
+
+ # The dn of our existing entry/object
+ #One result only from ldapSearch
+ person = person_list[0][1]
+ dn = 'uid=' + person['uid'][0] + "," + self.baseDN
+
+ if new_attributes_dict:
+ old = {}
+ for k in new_attributes_dict:
+ if k not in person:
+ old[k] = ''
+ else :
+ old[k] = person[k]
+ logger.debug(" LDAPapi.py \t LdapModifyUser new_attributes %s"\
+ %( new_attributes_dict))
+ result = self.LdapModify(dn, old, new_attributes_dict)
+ return result
+ else:
+ logger.error("LDAP \t LdapModifyUser No new attributes given. ")
+ return {'bool': False}
+
+
+
+
+ def LdapMarkUserAsDeleted(self, record):
+
+
+ new_attrs = {}
+ #Disable account
+ new_attrs['shadowExpire'] = '0'
+ logger.debug(" LDAPapi.py \t LdapMarkUserAsDeleted ")
+ ret = self.LdapModifyUser(record, new_attrs)
+ return ret
+
+
+ def LdapResetPassword(self, record):
+ """
+ Resets password for the user whose record is the parameter and changes
+ the corresponding entry in the LDAP.
+
+ """
+ password = self.generate_password()
+ attrs = {}
+ attrs['userPassword'] = self.encrypt_password(password)
+ logger.debug("LDAP LdapResetPassword encrypt_password %s"\
+ %(attrs['userPassword']))
+ result = self.LdapModifyUser(record, attrs)
+ return result
+
+
+ def LdapSearch (self, req_ldap = None, expected_fields = None ):
+ """
+ Used to search directly in LDAP, by using ldap filters and
+ return fields.
+ When req_ldap is None, returns all the entries in the LDAP.
+
+ """
+ result = self.conn.connect(bind = False)
+ if (result['bool']) :
+
+ return_fields_list = []
+ if expected_fields == None :
+ return_fields_list = ['mail', 'givenName', 'sn', 'uid', \
+ 'sshPublicKey', 'shadowExpire']
+ else :
+ return_fields_list = expected_fields
+ #No specifc request specified, get the whole LDAP
+ if req_ldap == None:
+ req_ldap = '(cn=*)'
+
+ logger.debug("LDAP.PY \t LdapSearch req_ldap %s \
+ return_fields_list %s" \
+ %(req_ldap, return_fields_list))
+
+ try:
+ msg_id = self.conn.ldapserv.search(
+ self.baseDN,ldap.SCOPE_SUBTREE,\
+ req_ldap, return_fields_list)
+ #Get all the results matching the search from ldap in one
+ #shot (1 value)
+ result_type, result_data = \
+ self.conn.ldapserv.result(msg_id, 1)
+
+ self.conn.close()
+
+ logger.debug("LDAP.PY \t LdapSearch result_data %s"\
+ %(result_data))
+
+ return result_data
+
+ except ldap.LDAPError, error :
+ logger.log_exc("LDAP LdapSearch Error %s" %error)
+ return []
+
+ else:
+ logger.error("LDAP.PY \t Connection Failed" )
+ return
+
+ def LdapFindUser(self, record = None, is_user_enabled=None, \
+ expected_fields = None):
+ """
+ Search a SFA user with a hrn. User should be already registered
+ in Senslab LDAP.
+ Returns one matching entry
+ """
+ custom_record = {}
+ if is_user_enabled:
+
+ custom_record['enabled'] = is_user_enabled
+ if record:
+ custom_record.update(record)
+
+
+ req_ldap = self.make_ldap_filters_from_record(custom_record)
+ return_fields_list = []
+ if expected_fields == None :
+ return_fields_list = ['mail', 'givenName', 'sn', 'uid', \
+ 'sshPublicKey']
+ else :
+ return_fields_list = expected_fields
+
+ result_data = self.LdapSearch(req_ldap, return_fields_list )
+ logger.debug("LDAP.PY \t LdapFindUser result_data %s" %(result_data))
+
+ if len(result_data) is 0:
+ return None
+ #Asked for a specific user
+ if record :
+ #try:
+ ldapentry = result_data[0][1]
+ logger.debug("LDAP.PY \t LdapFindUser ldapentry %s" %(ldapentry))
+ tmpname = ldapentry['uid'][0]
+
+ tmpemail = ldapentry['mail'][0]
+ if ldapentry['mail'][0] == "unknown":
+ tmpemail = None
+
+ parent_hrn = None
+ peer_authority = None
+ if 'hrn' in record:
+ hrn = record['hrn']
+ parent_hrn = get_authority(hrn)
+ if parent_hrn != self.authname:
+ peer_authority = parent_hrn
+ #In case the user was not imported from Senslab LDAP
+ #but from another federated site, has an account in
+ #senslab but currently using his hrn from federated site
+ #then the login is different from the one found in its hrn
+ if tmpname != hrn.split('.')[1]:
+ hrn = None
+ else:
+ hrn = None
+
+
+
+ results = {
+ 'type': 'user',
+ 'pkey': ldapentry['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry['givenName'][0],
+ 'last_name': ldapentry['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': parent_hrn,
+ 'peer_authority': peer_authority,
+ 'pointer' : -1,
+ 'hrn': hrn,
+ }
+ #except KeyError,error:
+ #logger.log_exc("LDAPapi \t LdaFindUser KEyError %s" \
+ #%error )
+ #return
+ else:
+ #Asked for all users in ldap
+ results = []
+ for ldapentry in result_data:
+ logger.debug(" LDAP.py LdapFindUser ldapentry name : %s " \
+ %(ldapentry[1]['uid'][0]))
+ tmpname = ldapentry[1]['uid'][0]
+ hrn = self.authname + "." + tmpname
+
+ tmpemail = ldapentry[1]['mail'][0]
+ if ldapentry[1]['mail'][0] == "unknown":
+ tmpemail = None
+
+
+ parent_hrn = get_authority(hrn)
+
+ try:
+ results.append( {
+ 'type': 'user',
+ 'pkey': ldapentry[1]['sshPublicKey'][0],
+ #'uid': ldapentry[1]['uid'][0],
+ 'uid': tmpname ,
+ 'email':tmpemail,
+ #'email': ldapentry[1]['mail'][0],
+ 'first_name': ldapentry[1]['givenName'][0],
+ 'last_name': ldapentry[1]['sn'][0],
+ #'phone': 'none',
+ 'serial': 'none',
+ 'authority': self.authname,
+ 'peer_authority': '',
+ 'pointer' : -1,
+ 'hrn': hrn,
+ } )
+ except KeyError, error:
+ logger.log_exc("LDAPapi.PY \t LdapFindUser EXCEPTION %s" \
+ %(error))
+ return
+ return results
+
--- /dev/null
+#import sys
+from httplib import HTTPConnection, HTTPException, NotConnected
+import json
+#import datetime
+#from time import gmtime, strftime
+import os.path
+import sys
+#import urllib
+#import urllib2
+from sfa.util.config import Config
+#from sfa.util.xrn import hrn_to_urn, get_authority, Xrn, get_leaf
+
+from sfa.util.sfalogging import logger
+
+
+OAR_REQUEST_POST_URI_DICT = {'POST_job':{'uri': '/oarapi/jobs.json'},
+ 'DELETE_jobs_id':{'uri':'/oarapi/jobs/id.json'},
+ }
+
+POST_FORMAT = {'json' : {'content':"application/json", 'object':json},}
+
+#OARpostdatareqfields = {'resource' :"/nodes=", 'command':"sleep", \
+ #'workdir':"/home/", 'walltime':""}
+
+
+
+class JsonPage:
+ """Class used to manipulate jsopn pages given by OAR."""
+ def __init__(self):
+ #All are boolean variables
+ self.concatenate = False
+ #Indicates end of data, no more pages to be loaded.
+ self.end = False
+ self.next_page = False
+ #Next query address
+ self.next_offset = None
+ #Json page
+ self.raw_json = None
+
+ def FindNextPage(self):
+ """ Gets next data page from OAR when the query's results
+ are too big to be transmitted in a single page.
+ Uses the "links' item in the json returned to check if
+ an additionnal page has to be loaded.
+ Returns : next page , next offset query
+ """
+ if "links" in self.raw_json:
+ for page in self.raw_json['links']:
+ if page['rel'] == 'next':
+ self.concatenate = True
+ self.next_page = True
+ self.next_offset = "?" + page['href'].split("?")[1]
+ print>>sys.stderr, "\r\n \t FindNextPage NEXT LINK"
+ return
+
+ if self.concatenate :
+ self.end = True
+ self.next_page = False
+ self.next_offset = None
+
+ return
+
+ #Otherwise, no next page and no concatenate, must be a single page
+ #Concatenate the single page and get out of here.
+ else:
+ self.next_page = False
+ self.concatenate = True
+ self.next_offset = None
+ return
+
+ @staticmethod
+ def ConcatenateJsonPages(saved_json_list):
+ #reset items list
+
+ tmp = {}
+ tmp['items'] = []
+
+ for page in saved_json_list:
+ tmp['items'].extend(page['items'])
+ return tmp
+
+
+ def ResetNextPage(self):
+ self.next_page = True
+ self.next_offset = None
+ self.concatenate = False
+ self.end = False
+
+
+class OARrestapi:
+ def __init__(self, config_file = '/etc/sfa/oar_config.py'):
+ self.oarserver = {}
+
+
+ self.oarserver['uri'] = None
+ self.oarserver['postformat'] = 'json'
+
+ try:
+ execfile(config_file, self.__dict__)
+
+ self.config_file = config_file
+ # path to configuration data
+ self.config_path = os.path.dirname(config_file)
+
+ except IOError:
+ raise IOError, "Could not find or load the configuration file: %s" \
+ % config_file
+ #logger.setLevelDebug()
+ self.oarserver['ip'] = self.OAR_IP
+ self.oarserver['port'] = self.OAR_PORT
+ self.jobstates = ['Terminated', 'Hold', 'Waiting', 'toLaunch', \
+ 'toError', 'toAckReservation', 'Launching', \
+ 'Finishing', 'Running', 'Suspended', 'Resuming',\
+ 'Error']
+
+ self.parser = OARGETParser(self)
+
+
+ def GETRequestToOARRestAPI(self, request, strval=None, next_page=None, username = None ):
+ self.oarserver['uri'] = \
+ OARGETParser.OARrequests_uri_dict[request]['uri']
+ #Get job details with username
+ if 'owner' in OARGETParser.OARrequests_uri_dict[request] and username:
+ self.oarserver['uri'] += OARGETParser.OARrequests_uri_dict[request]['owner'] + username
+ headers = {}
+ data = json.dumps({})
+ logger.debug("OARrestapi \tGETRequestToOARRestAPI %s" %(request))
+ if strval:
+ self.oarserver['uri'] = self.oarserver['uri'].\
+ replace("id",str(strval))
+
+ if next_page:
+ self.oarserver['uri'] += next_page
+
+ if username:
+ headers['X-REMOTE_IDENT'] = username
+
+ logger.debug("OARrestapi: \t GETRequestToOARRestAPI \
+ self.oarserver['uri'] %s strval %s" \
+ %(self.oarserver['uri'], strval))
+ try :
+ #seems that it does not work if we don't add this
+ headers['content-length'] = '0'
+
+ conn = HTTPConnection(self.oarserver['ip'], \
+ self.oarserver['port'])
+ conn.request("GET", self.oarserver['uri'], data, headers)
+ resp = ( conn.getresponse()).read()
+ conn.close()
+
+ except HTTPException, error :
+ logger.log_exc("GET_OAR_SRVR : Problem with OAR server : %s " \
+ %(error))
+ #raise ServerError("GET_OAR_SRVR : Could not reach OARserver")
+ try:
+ js_dict = json.loads(resp)
+ #print "\r\n \t\t\t js_dict keys" , js_dict.keys(), " \r\n", js_dict
+ return js_dict
+
+ except ValueError, error:
+ logger.log_exc("Failed to parse Server Response: %s ERROR %s"\
+ %(js_dict, error))
+ #raise ServerError("Failed to parse Server Response:" + js)
+
+
+ def POSTRequestToOARRestAPI(self, request, datadict, username=None):
+ """ Used to post a job on OAR , along with data associated
+ with the job.
+
+ """
+
+ #first check that all params for are OK
+ try:
+ self.oarserver['uri'] = OAR_REQUEST_POST_URI_DICT[request]['uri']
+
+ except KeyError:
+ logger.log_exc("OARrestapi \tPOSTRequestToOARRestAPI request not \
+ valid")
+ return
+ if datadict and 'strval' in datadict:
+ self.oarserver['uri'] = self.oarserver['uri'].replace("id", \
+ str(datadict['strval']))
+ del datadict['strval']
+
+ data = json.dumps(datadict)
+ headers = {'X-REMOTE_IDENT':username, \
+ 'content-type': POST_FORMAT['json']['content'], \
+ 'content-length':str(len(data))}
+ try :
+
+ conn = HTTPConnection(self.oarserver['ip'], \
+ self.oarserver['port'])
+ conn.request("POST", self.oarserver['uri'], data, headers)
+ resp = (conn.getresponse()).read()
+ conn.close()
+ except NotConnected:
+ logger.log_exc("POSTRequestToOARRestAPI NotConnected ERROR: \
+ data %s \r\n \t\n \t\t headers %s uri %s" \
+ %(data,headers,self.oarserver['uri']))
+
+ #raise ServerError("POST_OAR_SRVR : error")
+
+ try:
+ answer = json.loads(resp)
+ logger.debug("POSTRequestToOARRestAPI : answer %s" %(answer))
+ return answer
+
+ except ValueError, error:
+ logger.log_exc("Failed to parse Server Response: error %s \
+ %s" %(error))
+ #raise ServerError("Failed to parse Server Response:" + answer)
+
+
+
+def AddOarNodeId(tuplelist, value):
+ """ Adds Oar internal node id to the nodes attributes """
+
+ tuplelist.append(('oar_id', int(value)))
+
+
+def AddNodeNetworkAddr(dictnode, value):
+ #Inserts new key. The value associated is a tuple list
+ node_id = value
+
+ dictnode[node_id] = [('node_id', node_id),('hostname', node_id) ]
+
+ return node_id
+
+def AddNodeSite(tuplelist, value):
+ tuplelist.append(('site', str(value)))
+
+def AddNodeRadio(tuplelist, value):
+ tuplelist.append(('radio', str(value)))
+
+
+def AddMobility(tuplelist, value):
+ if value is 0:
+ tuplelist.append(('mobile', 'False'))
+ else :
+ tuplelist.append(('mobile', 'True'))
+
+def AddPosX(tuplelist, value):
+ tuplelist.append(('posx', value))
+
+def AddPosY(tuplelist, value):
+ tuplelist.append(('posy', value))
+
+def AddPosZ(tuplelist, value):
+ tuplelist.append(('posz', value))
+
+def AddBootState(tuplelist, value):
+ tuplelist.append(('boot_state', str(value)))
+
+#Insert a new node into the dictnode dictionary
+def AddNodeId(dictnode, value):
+ #Inserts new key. The value associated is a tuple list
+ node_id = int(value)
+
+ dictnode[node_id] = [('node_id', node_id)]
+ return node_id
+
+def AddHardwareType(tuplelist, value):
+ value_list = value.split(':')
+ tuplelist.append(('archi', value_list[0]))
+ tuplelist.append(('radio', value_list[1]))
+
+
+class OARGETParser:
+ resources_fulljson_dict = {
+ 'network_address' : AddNodeNetworkAddr,
+ 'site': AddNodeSite,
+ 'radio': AddNodeRadio,
+ 'mobile': AddMobility,
+ 'x': AddPosX,
+ 'y': AddPosY,
+ 'z':AddPosZ,
+ 'archi':AddHardwareType,
+ 'state':AddBootState,
+ 'id' : AddOarNodeId,
+ }
+
+
+ def __init__(self, srv) :
+ self.version_json_dict = {
+ 'api_version' : None , 'apilib_version' :None,\
+ 'api_timezone': None, 'api_timestamp': None, 'oar_version': None ,}
+ self.config = Config()
+ self.interface_hrn = self.config.SFA_INTERFACE_HRN
+ self.timezone_json_dict = {
+ 'timezone': None, 'api_timestamp': None, }
+ #self.jobs_json_dict = {
+ #'total' : None, 'links' : [],\
+ #'offset':None , 'items' : [], }
+ #self.jobs_table_json_dict = self.jobs_json_dict
+ #self.jobs_details_json_dict = self.jobs_json_dict
+ self.server = srv
+ self.node_dictlist = {}
+
+ self.json_page = JsonPage()
+
+ self.site_dict = {}
+ self.SendRequest("GET_version")
+
+
+
+
+
+ def ParseVersion(self) :
+ #print self.json_page.raw_json
+ #print >>sys.stderr, self.json_page.raw_json
+ if 'oar_version' in self.json_page.raw_json :
+ self.version_json_dict.update(api_version = \
+ self.json_page.raw_json['api_version'],
+ apilib_version = self.json_page.raw_json['apilib_version'],
+ api_timezone = self.json_page.raw_json['api_timezone'],
+ api_timestamp = self.json_page.raw_json['api_timestamp'],
+ oar_version = self.json_page.raw_json['oar_version'] )
+ else :
+ self.version_json_dict.update(api_version = \
+ self.json_page.raw_json['api'] ,
+ apilib_version = self.json_page.raw_json['apilib'],
+ api_timezone = self.json_page.raw_json['api_timezone'],
+ api_timestamp = self.json_page.raw_json['api_timestamp'],
+ oar_version = self.json_page.raw_json['oar'] )
+
+ print self.version_json_dict['apilib_version']
+
+
+ def ParseTimezone(self) :
+ api_timestamp = self.json_page.raw_json['api_timestamp']
+ api_tz = self.json_page.raw_json['timezone']
+ return api_timestamp, api_tz
+
+ def ParseJobs(self) :
+ self.jobs_list = []
+ print " ParseJobs "
+ return self.json_page.raw_json
+
+ def ParseJobsTable(self) :
+ print "ParseJobsTable"
+
+ def ParseJobsDetails (self):
+ # currently, this function is not used a lot,
+ #so i have no idea what be usefull to parse,
+ #returning the full json. NT
+ #logger.debug("ParseJobsDetails %s " %(self.json_page.raw_json))
+ return self.json_page.raw_json
+
+
+ def ParseJobsIds(self):
+
+ job_resources = ['wanted_resources', 'name', 'id', 'start_time', \
+ 'state','owner','walltime','message']
+
+
+ job_resources_full = ['launching_directory', 'links', \
+ 'resubmit_job_id', 'owner', 'events', 'message', \
+ 'scheduled_start', 'id', 'array_id', 'exit_code', \
+ 'properties', 'state','array_index', 'walltime', \
+ 'type', 'initial_request', 'stop_time', 'project',\
+ 'start_time', 'dependencies','api_timestamp','submission_time', \
+ 'reservation', 'stdout_file', 'types', 'cpuset_name', \
+ 'name', 'wanted_resources','queue','stderr_file','command']
+
+
+ job_info = self.json_page.raw_json
+ #logger.debug("OARESTAPI ParseJobsIds %s" %(self.json_page.raw_json))
+ values = []
+ try:
+ for k in job_resources:
+ values.append(job_info[k])
+ return dict(zip(job_resources, values))
+
+ except KeyError:
+ logger.log_exc("ParseJobsIds KeyError ")
+
+
+ def ParseJobsIdResources(self):
+ """ Parses the json produced by the request
+ /oarapi/jobs/id/resources.json.
+ Returns a list of oar node ids that are scheduled for the
+ given job id.
+
+ """
+ job_resources = []
+ for resource in self.json_page.raw_json['items']:
+ job_resources.append(resource['id'])
+
+ #logger.debug("OARESTAPI \tParseJobsIdResources %s" %(self.json_page.raw_json))
+ return job_resources
+
+ def ParseResources(self) :
+ """ Parses the json produced by a get_resources request on oar."""
+
+ #logger.debug("OARESTAPI \tParseResources " )
+ #resources are listed inside the 'items' list from the json
+ self.json_page.raw_json = self.json_page.raw_json['items']
+ self.ParseNodes()
+
+ def ParseReservedNodes(self):
+ """ Returns an array containing the list of the reserved nodes """
+
+ #resources are listed inside the 'items' list from the json
+ reservation_list = []
+ job = {}
+ #Parse resources info
+ for json_element in self.json_page.raw_json['items']:
+ #In case it is a real reservation (not asap case)
+ if json_element['scheduled_start']:
+ job['t_from'] = json_element['scheduled_start']
+ job['t_until'] = int(json_element['scheduled_start']) + \
+ int(json_element['walltime'])
+ #Get resources id list for the job
+ job['resource_ids'] = \
+ [ node_dict['id'] for node_dict in json_element['resources']]
+ else:
+ job['t_from'] = "As soon as possible"
+ job['t_until'] = "As soon as possible"
+ job['resource_ids'] = ["Undefined"]
+
+
+ job['state'] = json_element['state']
+ job['lease_id'] = json_element['id']
+
+
+ job['user'] = json_element['owner']
+ #logger.debug("OARRestapi \tParseReservedNodes job %s" %(job))
+ reservation_list.append(job)
+ #reset dict
+ job = {}
+ return reservation_list
+
+ def ParseRunningJobs(self):
+ """ Gets the list of nodes currently in use from the attributes of the
+ running jobs.
+
+ """
+ logger.debug("OARESTAPI \tParseRunningJobs__________________________ ")
+ #resources are listed inside the 'items' list from the json
+ nodes = []
+ for job in self.json_page.raw_json['items']:
+ for node in job['nodes']:
+ nodes.append(node['network_address'])
+ return nodes
+
+
+
+ def ParseDeleteJobs(self):
+ """ No need to parse anything in this function.A POST
+ is done to delete the job.
+
+ """
+ return
+
+ def ParseResourcesFull(self) :
+ """ This method is responsible for parsing all the attributes
+ of all the nodes returned by OAR when issuing a get resources full.
+ The information from the nodes and the sites are separated.
+ Updates the node_dictlist so that the dictionnary of the platform's
+ nodes is available afterwards.
+
+ """
+ logger.debug("OARRESTAPI ParseResourcesFull________________________ ")
+ #print self.json_page.raw_json[1]
+ #resources are listed inside the 'items' list from the json
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.json_page.raw_json = self.json_page.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+ return self.node_dictlist
+
+ def ParseResourcesFullSites(self) :
+ """ UNUSED. Originally used to get information from the sites.
+ ParseResourcesFull is used instead.
+
+ """
+ if self.version_json_dict['apilib_version'] != "0.2.10" :
+ self.json_page.raw_json = self.json_page.raw_json['items']
+ self.ParseNodes()
+ self.ParseSites()
+ return self.site_dict
+
+
+
+ def ParseNodes(self):
+ """ Parse nodes properties from OAR
+ Put them into a dictionary with key = node id and value is a dictionary
+ of the node properties and properties'values.
+
+ """
+ node_id = None
+ keys = self.resources_fulljson_dict.keys()
+ keys.sort()
+
+ for dictline in self.json_page.raw_json:
+ node_id = None
+ # dictionary is empty and/or a new node has to be inserted
+ node_id = self.resources_fulljson_dict['network_address'](\
+ self.node_dictlist, dictline['network_address'])
+ for k in keys:
+ if k in dictline:
+ if k == 'network_address':
+ continue
+
+ self.resources_fulljson_dict[k](\
+ self.node_dictlist[node_id], dictline[k])
+
+ #The last property has been inserted in the property tuple list,
+ #reset node_id
+ #Turn the property tuple list (=dict value) into a dictionary
+ self.node_dictlist[node_id] = dict(self.node_dictlist[node_id])
+ node_id = None
+
+ @staticmethod
+ def slab_hostname_to_hrn( root_auth, hostname):
+ return root_auth + '.'+ hostname
+
+
+
+ def ParseSites(self):
+ """ Returns a list of dictionnaries containing the sites' attributes."""
+
+ nodes_per_site = {}
+ config = Config()
+ #logger.debug(" OARrestapi.py \tParseSites self.node_dictlist %s"\
+ #%(self.node_dictlist))
+ # Create a list of nodes per site_id
+ for node_id in self.node_dictlist:
+ node = self.node_dictlist[node_id]
+
+ if node['site'] not in nodes_per_site:
+ nodes_per_site[node['site']] = []
+ nodes_per_site[node['site']].append(node['node_id'])
+ else:
+ if node['node_id'] not in nodes_per_site[node['site']]:
+ nodes_per_site[node['site']].append(node['node_id'])
+
+ #Create a site dictionary whose key is site_login_base (name of the site)
+ # and value is a dictionary of properties, including the list
+ #of the node_ids
+ for node_id in self.node_dictlist:
+ node = self.node_dictlist[node_id]
+ #node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, \
+ #node['site'],node['hostname'])})
+ node.update({'hrn':self.slab_hostname_to_hrn(self.interface_hrn, node['hostname'])})
+ self.node_dictlist.update({node_id:node})
+
+ if node['site'] not in self.site_dict:
+ self.site_dict[node['site']] = {
+ 'site':node['site'],
+ 'node_ids':nodes_per_site[node['site']],
+ 'latitude':"48.83726",
+ 'longitude':"- 2.10336",'name':config.SFA_REGISTRY_ROOT_AUTH,
+ 'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+ 'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+ 'abbreviated_name':"senslab", 'address_ids': [],
+ 'url':"http,//www.senslab.info", 'person_ids':[],
+ 'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
+ 'date_created': None, 'peer_id': None }
+ #if node['site_login_base'] not in self.site_dict.keys():
+ #self.site_dict[node['site_login_base']] = {'login_base':node['site_login_base'],
+ #'node_ids':nodes_per_site[node['site_login_base']],
+ #'latitude':"48.83726",
+ #'longitude':"- 2.10336",'name':"senslab",
+ #'pcu_ids':[], 'max_slices':None, 'ext_consortium_id':None,
+ #'max_slivers':None, 'is_public':True, 'peer_site_id': None,
+ #'abbreviated_name':"senslab", 'address_ids': [],
+ #'url':"http,//www.senslab.info", 'person_ids':[],
+ #'site_tag_ids':[], 'enabled': True, 'slice_ids':[],
+ #'date_created': None, 'peer_id': None }
+
+
+
+
+ OARrequests_uri_dict = {
+ 'GET_version':
+ {'uri':'/oarapi/version.json', 'parse_func': ParseVersion},
+ 'GET_timezone':
+ {'uri':'/oarapi/timezone.json' ,'parse_func': ParseTimezone },
+ 'GET_jobs':
+ {'uri':'/oarapi/jobs.json','parse_func': ParseJobs},
+ 'GET_jobs_id':
+ {'uri':'/oarapi/jobs/id.json','parse_func': ParseJobsIds},
+ 'GET_jobs_id_resources':
+ {'uri':'/oarapi/jobs/id/resources.json',\
+ 'parse_func': ParseJobsIdResources},
+ 'GET_jobs_table':
+ {'uri':'/oarapi/jobs/table.json','parse_func': ParseJobsTable},
+ 'GET_jobs_details':
+ {'uri':'/oarapi/jobs/details.json',\
+ 'parse_func': ParseJobsDetails},
+ 'GET_reserved_nodes':
+ {'uri':
+ '/oarapi/jobs/details.json?state=Running,Waiting,Launching',\
+ 'owner':'&user=',
+ 'parse_func':ParseReservedNodes},
+
+
+ 'GET_running_jobs':
+ {'uri':'/oarapi/jobs/details.json?state=Running',\
+ 'parse_func':ParseRunningJobs},
+ 'GET_resources_full':
+ {'uri':'/oarapi/resources/full.json',\
+ 'parse_func': ParseResourcesFull},
+ 'GET_sites':
+ {'uri':'/oarapi/resources/full.json',\
+ 'parse_func': ParseResourcesFullSites},
+ 'GET_resources':
+ {'uri':'/oarapi/resources.json' ,'parse_func': ParseResources},
+ 'DELETE_jobs_id':
+ {'uri':'/oarapi/jobs/id.json' ,'parse_func': ParseDeleteJobs}
+ }
+
+
+
+
+ def SendRequest(self, request, strval = None , username = None):
+ """ Connects to OAR , sends the valid GET requests and uses
+ the appropriate json parsing functions.
+
+ """
+ save_json = None
+
+ self.json_page.ResetNextPage()
+ save_json = []
+
+ if request in self.OARrequests_uri_dict :
+ while self.json_page.next_page:
+ self.json_page.raw_json = self.server.GETRequestToOARRestAPI(\
+ request, \
+ strval, \
+ self.json_page.next_offset, \
+ username)
+ self.json_page.FindNextPage()
+ if self.json_page.concatenate:
+ save_json.append(self.json_page.raw_json)
+
+ if self.json_page.concatenate and self.json_page.end :
+ self.json_page.raw_json = \
+ self.json_page.ConcatenateJsonPages(save_json)
+
+ return self.OARrequests_uri_dict[request]['parse_func'](self)
+ else:
+ logger.error("OARRESTAPI OARGetParse __init__ : ERROR_REQUEST " \
+ %(request))
+
--- /dev/null
+import time
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn, get_authority
+
+from sfa.rspecs.rspec import RSpec
+#from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.version_manager import VersionManager
+
+
+from sfa.rspecs.elements.versions.slabv1Node import SlabPosition, SlabNode, \
+ SlabLocation
+from sfa.util.sfalogging import logger
+
+from sfa.util.xrn import Xrn
+
+def slab_xrn_to_hostname(xrn):
+ return Xrn.unescape(Xrn(xrn=xrn, type='node').get_leaf())
+
+def slab_xrn_object(root_auth, hostname):
+ """Attributes are urn and hrn.
+ Get the hostname using slab_xrn_to_hostname on the urn.
+
+ """
+ return Xrn('.'.join( [root_auth, Xrn.escape(hostname)]), type='node')
+
+class SlabAggregate:
+
+ sites = {}
+ nodes = {}
+ api = None
+ interfaces = {}
+ links = {}
+ node_tags = {}
+
+ prepared = False
+
+ user_options = {}
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_slice_and_slivers(self, slice_xrn, login=None):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ sfa_slice = None
+ if not slice_xrn:
+ return (sfa_slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = slice_hrn
+
+ slices = self.driver.slab_api.GetSlices(slice_filter= str(slice_name), \
+ slice_filter_type = 'slice_hrn', \
+ login=login)
+
+ logger.debug("Slabaggregate api \tget_slice_and_slivers \
+ sfa_slice %s \r\n slices %s self.driver.hrn %s" \
+ %(sfa_slice, slices, self.driver.hrn))
+ if not slices:
+ return (sfa_slice, slivers)
+ #if isinstance(sfa_slice, list):
+ #sfa_slice = slices[0]
+ #else:
+ #sfa_slice = slices
+
+ # sort slivers by node id , if there is a job
+ #and therfore, node allocated to this slice
+ for sfa_slice in slices:
+ try:
+ node_ids_list = sfa_slice['node_ids']
+ except KeyError:
+ logger.log_exc("SLABAGGREGATE \t \
+ get_slice_and_slivers KeyError ")
+ continue
+
+ for node in node_ids_list:
+ sliver_xrn = Xrn(slice_urn, type='sliver', id=node)
+ sliver_xrn.set_authority(self.driver.hrn)
+ sliver = Sliver({'sliver_id':sliver_xrn.urn,
+ 'name': sfa_slice['hrn'],
+ 'type': 'slab-node',
+ 'tags': []})
+
+ slivers[node] = sliver
+
+
+ #Add default sliver attribute :
+ #connection information for senslab
+ if get_authority (sfa_slice['hrn']) == self.driver.slab_api.root_auth:
+ tmp = sfa_slice['hrn'].split('.')
+ ldap_username = tmp[1].split('_')[0]
+ ssh_access = None
+ slivers['default_sliver'] = {'ssh': ssh_access , \
+ 'login': ldap_username}
+
+ #TODO get_slice_and_slivers Find the login of the external user
+
+ logger.debug("SLABAGGREGATE api get_slice_and_slivers slivers %s "\
+ %(slivers))
+ return (slices, slivers)
+
+
+
+ def get_nodes(self, slices=None, slivers=[], options={}):
+ # NT: the semantic of this function is not clear to me :
+ # if slice is not defined, then all the nodes should be returned
+ # if slice is defined, we should return only the nodes that
+ # are part of this slice
+ # but what is the role of the slivers parameter ?
+ # So i assume that slice['node_ids'] will be the same as slivers for us
+ #filter_dict = {}
+ #if slice_xrn:
+ #if not slices or not slices['node_ids']:
+ #return ([],[])
+ #tags_filter = {}
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.slab_api.GetLeaseGranularity()
+
+ # Commenting this part since all nodes should be returned,
+ # even if a slice is provided
+ #if slice :
+ # if 'node_ids' in slice and slice['node_ids']:
+ # #first case, a non empty slice was provided
+ # filter['hostname'] = slice['node_ids']
+ # tags_filter=filter.copy()
+ # nodes = self.driver.slab_api.GetNodes(filter['hostname'])
+ # else :
+ # #second case, a slice was provided, but is empty
+ # nodes={}
+ #else :
+ # #third case, no slice was provided
+ # nodes = self.driver.slab_api.GetNodes()
+ nodes = self.driver.slab_api.GetNodes()
+ #geni_available = options.get('geni_available')
+ #if geni_available:
+ #filter['boot_state'] = 'boot'
+
+ #filter.update({'peer_id': None})
+ #nodes = self.driver.slab_api.GetNodes(filter['hostname'])
+
+ #site_ids = []
+ #interface_ids = []
+ #tag_ids = []
+ nodes_dict = {}
+ for node in nodes:
+
+ nodes_dict[node['node_id']] = node
+ #logger.debug("SLABAGGREGATE api get_nodes nodes %s "\
+ #%(nodes ))
+ # get sites
+ #sites_dict = self.get_sites({'site_id': site_ids})
+ # get interfaces
+ #interfaces = self.get_interfaces({'interface_id':interface_ids})
+ # get tags
+ #node_tags = self.get_node_tags(tags_filter)
+
+ #if slices, this means we got to list all the nodes given to this slice
+ # Make a list of all the nodes in the slice before getting their attributes
+ rspec_nodes = []
+ slice_nodes_list = []
+ logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
+ %(slices ))
+ if slices:
+ for one_slice in slices:
+ try:
+ slice_nodes_list = one_slice['node_ids']
+ except KeyError:
+ pass
+ #for node in one_slice['node_ids']:
+ #slice_nodes_list.append(node)
+
+ reserved_nodes = self.driver.slab_api.GetNodesCurrentlyInUse()
+ logger.debug("SLABAGGREGATE api get_nodes slice_nodes_list %s "\
+ %(slice_nodes_list))
+ for node in nodes:
+ # skip whitelisted nodes
+ #if node['slice_ids_whitelist']:
+ #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
+ #continue
+ #rspec_node = Node()
+ #logger.debug("SLABAGGREGATE api get_nodes node %s "\
+ #%(node))
+ if slice_nodes_list == [] or node['hostname'] in slice_nodes_list:
+
+ rspec_node = SlabNode()
+ # xxx how to retrieve site['login_base']
+ #site_id=node['site_id']
+ #site=sites_dict[site_id]
+
+ rspec_node['mobile'] = node['mobile']
+ rspec_node['archi'] = node['archi']
+ rspec_node['radio'] = node['radio']
+
+ slab_xrn = slab_xrn_object(self.driver.slab_api.root_auth, \
+ node['hostname'])
+ rspec_node['component_id'] = slab_xrn.urn
+ rspec_node['component_name'] = node['hostname']
+ rspec_node['component_manager_id'] = \
+ hrn_to_urn(self.driver.slab_api.root_auth, 'authority+sa')
+
+ # Senslab's nodes are federated : there is only one authority
+ # for all Senslab sites, registered in SFA.
+ # Removing the part including the site
+ # in authority_id SA 27/07/12
+ rspec_node['authority_id'] = rspec_node['component_manager_id']
+
+ # do not include boot state (<available> element)
+ #in the manifest rspec
+
+
+ rspec_node['boot_state'] = node['boot_state']
+ if node['hostname'] in reserved_nodes:
+ rspec_node['boot_state'] = "Reserved"
+ rspec_node['exclusive'] = 'true'
+ rspec_node['hardware_types'] = [HardwareType({'name': \
+ 'slab-node'})]
+
+ # only doing this because protogeni rspec needs
+ # to advertise available initscripts
+ # add site/interface info to nodes.
+ # assumes that sites, interfaces and tags have already been
+ #prepared.
+
+ location = SlabLocation({'country':'France','site': \
+ node['site']})
+ rspec_node['location'] = location
+
+
+ position = SlabPosition()
+ for field in position :
+ try:
+ position[field] = node[field]
+ except KeyError, error :
+ logger.log_exc("SLABAGGREGATE\t get_nodes \
+ position %s "%(error))
+
+ rspec_node['position'] = position
+ #rspec_node['interfaces'] = []
+
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+ rspec_node['tags'] = []
+ if node['hostname'] in slivers:
+ # add sliver info
+ sliver = slivers[node['hostname']]
+ rspec_node['sliver_id'] = sliver['sliver_id']
+ rspec_node['client_id'] = node['hostname']
+ rspec_node['slivers'] = [sliver]
+
+ # slivers always provide the ssh service
+ login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
+ rspec_nodes.append(rspec_node)
+
+ return (rspec_nodes)
+
+ def get_leases(self, slice_record = None, options = {}):
+
+ now = int(time.time())
+ lease_filter = {'clip': now }
+
+ #if slice_record:
+ #lease_filter.update({'name': slice_record['name']})
+ return_fields = ['lease_id', 'hostname', 'site_id', \
+ 'name', 'start_time', 'duration']
+ #leases = self.driver.slab_api.GetLeases(lease_filter)
+ leases = self.driver.slab_api.GetLeases()
+ grain = self.driver.slab_api.GetLeaseGranularity()
+ site_ids = []
+ rspec_leases = []
+ for lease in leases:
+ #as many leases as there are nodes in the job
+ for node in lease['reserved_nodes']:
+ rspec_lease = Lease()
+ rspec_lease['lease_id'] = lease['lease_id']
+ #site = node['site_id']
+ slab_xrn = slab_xrn_object(self.driver.slab_api.root_auth, node)
+ rspec_lease['component_id'] = slab_xrn.urn
+ #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
+ #site, node['hostname'])
+ try:
+ rspec_lease['slice_id'] = lease['slice_id']
+ except KeyError:
+ #No info on the slice used in slab_xp table
+ pass
+ rspec_lease['start_time'] = lease['t_from']
+ rspec_lease['duration'] = (lease['t_until'] - lease['t_from']) \
+ / grain
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
+
+
+ #rspec_leases = []
+ #for lease in leases:
+
+ #rspec_lease = Lease()
+
+ ## xxx how to retrieve site['login_base']
+
+ #rspec_lease['lease_id'] = lease['lease_id']
+ #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
+ #site['login_base'], lease['hostname'])
+ #slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ #slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ #rspec_lease['slice_id'] = slice_urn
+ #rspec_lease['t_from'] = lease['t_from']
+ #rspec_lease['t_until'] = lease['t_until']
+ #rspec_leases.append(rspec_lease)
+ #return rspec_leases
+#from plc/aggregate.py
+ def get_rspec(self, slice_xrn=None, login=None, version = None, options={}):
+
+ rspec = None
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+ logger.debug("SlabAggregate \t get_rspec ***version %s \
+ version.type %s version.version %s options %s \r\n" \
+ %(version,version.type,version.version,options))
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, \
+ version.version, 'ad')
+
+ else:
+ rspec_version = version_manager._get_version(version.type, \
+ version.version, 'manifest')
+
+ slices, slivers = self.get_slice_and_slivers(slice_xrn, login)
+ #at this point sliver may be empty if no senslab job
+ #is running for this user/slice.
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+
+ #if slice and 'expires' in slice:
+ #rspec.xml.set('expires', datetime_to_epoch(slice['expires']))
+ # add sliver defaults
+ #nodes, links = self.get_nodes(slice, slivers)
+ logger.debug("\r\n \r\n SlabAggregate \tget_rspec ******* slice_xrn %s slices %s\r\n \r\n"\
+ %(slice_xrn, slices))
+
+ try:
+ lease_option = options['list_leases']
+ except KeyError:
+ #If no options are specified, at least print the resources
+ lease_option = 'all'
+ #if slice_xrn :
+ #lease_option = 'all'
+ pass
+
+ if lease_option in ['all', 'resources']:
+ #if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+ nodes = self.get_nodes(slices, slivers)
+ logger.debug("\r\n \r\n SlabAggregate \ lease_option %s get rspec ******* nodes %s"\
+ %(lease_option, nodes[0]))
+
+ sites_set = set([node['location']['site'] for node in nodes] )
+
+ #In case creating a job, slice_xrn is not set to None
+ rspec.version.add_nodes(nodes)
+ if slice_xrn :
+ #Get user associated with this slice
+ #user = dbsession.query(RegRecord).filter_by(record_id = \
+ #slices['record_id_user']).first()
+
+ #ldap_username = (user.hrn).split('.')[1]
+
+
+ #for one_slice in slices :
+ ldap_username = slices[0]['hrn']
+ tmp = ldap_username.split('.')
+ ldap_username = tmp[1].split('_')[0]
+
+ if version.type == "Slab":
+ rspec.version.add_connection_information(ldap_username, sites_set)
+
+ default_sliver = slivers.get('default_sliver', [])
+ if default_sliver:
+ #default_sliver_attribs = default_sliver.get('tags', [])
+ logger.debug("SlabAggregate \tget_rspec **** \
+ default_sliver%s \r\n" %(default_sliver))
+ for attrib in default_sliver:
+ rspec.version.add_default_sliver_attribute(attrib, \
+ default_sliver[attrib])
+ if lease_option in ['all','leases']:
+ #if options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases = self.get_leases(slices)
+ rspec.version.add_leases(leases)
+
+ #logger.debug("SlabAggregate \tget_rspec ******* rspec_toxml %s \r\n"\
+ #%(rspec.toxml()))
+ return rspec.toxml()
--- /dev/null
+import subprocess
+import os
+
+from datetime import datetime
+
+from sfa.util.faults import SliverDoesNotExist, UnknownSfaType
+from sfa.util.sfalogging import logger
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegUser, RegSlice, RegKey
+from sqlalchemy.orm import joinedload
+
+from sfa.trust.certificate import Keypair, convert_public_key
+from sfa.trust.gid import create_uuid
+from sfa.trust.hierarchy import Hierarchy
+
+from sfa.managers.driver import Driver
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+from sfa.util.xrn import Xrn, hrn_to_urn, get_authority
+
+
+## thierry: everything that is API-related (i.e. handling incoming requests)
+# is taken care of
+# SlabDriver should be really only about talking to the senslab testbed
+
+
+from sfa.senslab.OARrestapi import OARrestapi
+from sfa.senslab.LDAPapi import LDAPapi
+
+from sfa.senslab.slabpostgres import SlabDB, slab_dbsession, SenslabXP
+
+
+from sfa.senslab.slabaggregate import SlabAggregate, slab_xrn_to_hostname, \
+ slab_xrn_object
+from sfa.senslab.slabslices import SlabSlices
+
+
+
+# thierry : note
+# this inheritance scheme is so that the driver object can receive
+# GetNodes or GetSites sorts of calls directly
+# and thus minimize the differences in the managers with the pl version
+
+
+class SlabTestbedAPI():
+
+ def __init__(self, config):
+ self.oar = OARrestapi()
+ self.ldap = LDAPapi()
+ self.time_format = "%Y-%m-%d %H:%M:%S"
+ self.root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ self.grain = 600 # 10 mins lease
+ return
+
+
+
+ #TODO clean GetPeers. 05/07/12SA
+ @staticmethod
+ def GetPeers ( auth = None, peer_filter=None, return_fields_list=None):
+
+ existing_records = {}
+ existing_hrns_by_types = {}
+ logger.debug("SLABDRIVER \tGetPeers auth = %s, peer_filter %s, \
+ return_field %s " %(auth , peer_filter, return_fields_list))
+ all_records = dbsession.query(RegRecord).filter(RegRecord.type.like('%authority%')).all()
+
+ for record in all_records:
+ existing_records[(record.hrn, record.type)] = record
+ if record.type not in existing_hrns_by_types:
+ existing_hrns_by_types[record.type] = [record.hrn]
+ else:
+ existing_hrns_by_types[record.type].append(record.hrn)
+
+
+ logger.debug("SLABDRIVER \tGetPeer\texisting_hrns_by_types %s "\
+ %( existing_hrns_by_types))
+ records_list = []
+
+ try:
+ if peer_filter:
+ records_list.append(existing_records[(peer_filter,'authority')])
+ else :
+ for hrn in existing_hrns_by_types['authority']:
+ records_list.append(existing_records[(hrn,'authority')])
+
+ logger.debug("SLABDRIVER \tGetPeer \trecords_list %s " \
+ %(records_list))
+
+ except KeyError:
+ pass
+
+ return_records = records_list
+ if not peer_filter and not return_fields_list:
+ return records_list
+
+
+ logger.debug("SLABDRIVER \tGetPeer return_records %s " \
+ %(return_records))
+ return return_records
+
+
+
+ #TODO : Handling OR request in make_ldap_filters_from_records
+ #instead of the for loop
+ #over the records' list
+ def GetPersons(self, person_filter=None):
+ """
+ person_filter should be a list of dictionnaries when not set to None.
+ Returns a list of users whose accounts are enabled found in ldap.
+
+ """
+ logger.debug("SLABDRIVER \tGetPersons person_filter %s" \
+ %(person_filter))
+ person_list = []
+ if person_filter and isinstance(person_filter, list):
+ #If we are looking for a list of users (list of dict records)
+ #Usually the list contains only one user record
+ for searched_attributes in person_filter:
+
+ #Get only enabled user accounts in senslab LDAP :
+ #add a filter for make_ldap_filters_from_record
+ person = self.ldap.LdapFindUser(searched_attributes, \
+ is_user_enabled=True)
+ #If a person was found, append it to the list
+ if person:
+ person_list.append(person)
+
+ #If the list is empty, return None
+ if len(person_list) is 0:
+ person_list = None
+
+ else:
+ #Get only enabled user accounts in senslab LDAP :
+ #add a filter for make_ldap_filters_from_record
+ person_list = self.ldap.LdapFindUser(is_user_enabled=True)
+
+ return person_list
+
+ def GetTimezone(self):
+ """ Get the OAR servier time and timezone.
+ Unused SA 16/11/12"""
+ server_timestamp, server_tz = self.oar.parser.\
+ SendRequest("GET_timezone")
+ return server_timestamp, server_tz
+
+
+ def DeleteJobs(self, job_id, username):
+ logger.debug("SLABDRIVER \tDeleteJobs jobid %s username %s " %(job_id, username))
+ if not job_id or job_id is -1:
+ return
+ #username = slice_hrn.split(".")[-1].rstrip("_slice")
+ reqdict = {}
+ reqdict['method'] = "delete"
+ reqdict['strval'] = str(job_id)
+
+
+ answer = self.oar.POSTRequestToOARRestAPI('DELETE_jobs_id', \
+ reqdict,username)
+ logger.debug("SLABDRIVER \tDeleteJobs jobid %s \r\n answer %s \
+ username %s" %(job_id, answer, username))
+ return answer
+
+
+
+ ##TODO : Unused GetJobsId ? SA 05/07/12
+ #def GetJobsId(self, job_id, username = None ):
+ #"""
+ #Details about a specific job.
+ #Includes details about submission time, jot type, state, events,
+ #owner, assigned ressources, walltime etc...
+
+ #"""
+ #req = "GET_jobs_id"
+ #node_list_k = 'assigned_network_address'
+ ##Get job info from OAR
+ #job_info = self.oar.parser.SendRequest(req, job_id, username)
+
+ #logger.debug("SLABDRIVER \t GetJobsId %s " %(job_info))
+ #try:
+ #if job_info['state'] == 'Terminated':
+ #logger.debug("SLABDRIVER \t GetJobsId job %s TERMINATED"\
+ #%(job_id))
+ #return None
+ #if job_info['state'] == 'Error':
+ #logger.debug("SLABDRIVER \t GetJobsId ERROR message %s "\
+ #%(job_info))
+ #return None
+
+ #except KeyError:
+ #logger.error("SLABDRIVER \tGetJobsId KeyError")
+ #return None
+
+ #parsed_job_info = self.get_info_on_reserved_nodes(job_info, \
+ #node_list_k)
+ ##Replaces the previous entry
+ ##"assigned_network_address" / "reserved_resources"
+ ##with "node_ids"
+ #job_info.update({'node_ids':parsed_job_info[node_list_k]})
+ #del job_info[node_list_k]
+ #logger.debug(" \r\nSLABDRIVER \t GetJobsId job_info %s " %(job_info))
+ #return job_info
+
+
+ def GetJobsResources(self, job_id, username = None):
+ #job_resources=['reserved_resources', 'assigned_resources',\
+ #'job_id', 'job_uri', 'assigned_nodes',\
+ #'api_timestamp']
+ #assigned_res = ['resource_id', 'resource_uri']
+ #assigned_n = ['node', 'node_uri']
+
+ req = "GET_jobs_id_resources"
+
+
+ #Get job resources list from OAR
+ node_id_list = self.oar.parser.SendRequest(req, job_id, username)
+ logger.debug("SLABDRIVER \t GetJobsResources %s " %(node_id_list))
+
+ hostname_list = \
+ self.__get_hostnames_from_oar_node_ids(node_id_list)
+
+
+ #Replaces the previous entry "assigned_network_address" /
+ #"reserved_resources"
+ #with "node_ids"
+ job_info = {'node_ids': hostname_list}
+
+ return job_info
+
+
+ def get_info_on_reserved_nodes(self, job_info, node_list_name):
+ #Get the list of the testbed nodes records and make a
+ #dictionnary keyed on the hostname out of it
+ node_list_dict = self.GetNodes()
+ #node_hostname_list = []
+ node_hostname_list = [node['hostname'] for node in node_list_dict]
+ #for node in node_list_dict:
+ #node_hostname_list.append(node['hostname'])
+ node_dict = dict(zip(node_hostname_list, node_list_dict))
+ try :
+ reserved_node_hostname_list = []
+ for index in range(len(job_info[node_list_name])):
+ #job_info[node_list_name][k] =
+ reserved_node_hostname_list[index] = \
+ node_dict[job_info[node_list_name][index]]['hostname']
+
+ logger.debug("SLABDRIVER \t get_info_on_reserved_nodes \
+ reserved_node_hostname_list %s" \
+ %(reserved_node_hostname_list))
+ except KeyError:
+ logger.error("SLABDRIVER \t get_info_on_reserved_nodes KEYERROR " )
+
+ return reserved_node_hostname_list
+
+ def GetNodesCurrentlyInUse(self):
+ """Returns a list of all the nodes already involved in an oar job"""
+ return self.oar.parser.SendRequest("GET_running_jobs")
+
+ def __get_hostnames_from_oar_node_ids(self, resource_id_list ):
+ full_nodes_dict_list = self.GetNodes()
+ #Put the full node list into a dictionary keyed by oar node id
+ oar_id_node_dict = {}
+ for node in full_nodes_dict_list:
+ oar_id_node_dict[node['oar_id']] = node
+
+ #logger.debug("SLABDRIVER \t __get_hostnames_from_oar_node_ids\
+ #oar_id_node_dict %s" %(oar_id_node_dict))
+
+ hostname_dict_list = []
+ for resource_id in resource_id_list:
+ #Because jobs requested "asap" do not have defined resources
+ if resource_id is not "Undefined":
+ hostname_dict_list.append(\
+ oar_id_node_dict[resource_id]['hostname'])
+
+ #hostname_list.append(oar_id_node_dict[resource_id]['hostname'])
+ return hostname_dict_list
+
+ def GetReservedNodes(self, username = None):
+ #Get the nodes in use and the reserved nodes
+ reservation_dict_list = \
+ self.oar.parser.SendRequest("GET_reserved_nodes", \
+ username = username)
+
+
+ for resa in reservation_dict_list:
+ logger.debug ("GetReservedNodes resa %s"%(resa))
+ #dict list of hostnames and their site
+ resa['reserved_nodes'] = \
+ self.__get_hostnames_from_oar_node_ids(resa['resource_ids'])
+
+ #del resa['resource_ids']
+ return reservation_dict_list
+
+ def GetNodes(self, node_filter_dict = None, return_fields_list = None):
+ """
+ node_filter_dict : dictionnary of lists
+
+ """
+ node_dict_by_id = self.oar.parser.SendRequest("GET_resources_full")
+ node_dict_list = node_dict_by_id.values()
+ logger.debug (" SLABDRIVER GetNodes node_filter_dict %s \
+ return_fields_list %s "%(node_filter_dict, return_fields_list))
+ #No filtering needed return the list directly
+ if not (node_filter_dict or return_fields_list):
+ return node_dict_list
+
+ return_node_list = []
+ if node_filter_dict:
+ for filter_key in node_filter_dict:
+ try:
+ #Filter the node_dict_list by each value contained in the
+ #list node_filter_dict[filter_key]
+ for value in node_filter_dict[filter_key]:
+ for node in node_dict_list:
+ if node[filter_key] == value:
+ if return_fields_list :
+ tmp = {}
+ for k in return_fields_list:
+ tmp[k] = node[k]
+ return_node_list.append(tmp)
+ else:
+ return_node_list.append(node)
+ except KeyError:
+ logger.log_exc("GetNodes KeyError")
+ return
+
+
+ return return_node_list
+ @staticmethod
+ def AddSlice(slice_record, user_record):
+ """Add slice to the sfa tables. Called by verify_slice
+ during lease/sliver creation.
+ """
+
+ sfa_record = RegSlice(hrn=slice_record['hrn'],
+ gid=slice_record['gid'],
+ pointer=slice_record['slice_id'],
+ authority=slice_record['authority'])
+
+ logger.debug("SLABDRIVER.PY AddSlice sfa_record %s user_record %s" \
+ %(sfa_record, user_record))
+ sfa_record.just_created()
+ dbsession.add(sfa_record)
+ dbsession.commit()
+ #Update the reg-researcher dependance table
+ sfa_record.reg_researchers = [user_record]
+ dbsession.commit()
+
+ #Update the senslab table with the new slice
+ #slab_slice = SenslabXP( slice_hrn = slice_record['slice_hrn'], \
+ #record_id_slice = sfa_record.record_id , \
+ #record_id_user = slice_record['record_id_user'], \
+ #peer_authority = slice_record['peer_authority'])
+
+ #logger.debug("SLABDRIVER.PY \tAddSlice slice_record %s \
+ #slab_slice %s sfa_record %s" \
+ #%(slice_record,slab_slice, sfa_record))
+ #slab_dbsession.add(slab_slice)
+ #slab_dbsession.commit()
+ return
+
+ def GetSites(self, site_filter_name_list = None, return_fields_list = None):
+ site_dict = self.oar.parser.SendRequest("GET_sites")
+ #site_dict : dict where the key is the sit ename
+ return_site_list = []
+ if not ( site_filter_name_list or return_fields_list):
+ return_site_list = site_dict.values()
+ return return_site_list
+
+ for site_filter_name in site_filter_name_list:
+ if site_filter_name in site_dict:
+ if return_fields_list:
+ for field in return_fields_list:
+ tmp = {}
+ try:
+ tmp[field] = site_dict[site_filter_name][field]
+ except KeyError:
+ logger.error("GetSites KeyError %s "%(field))
+ return None
+ return_site_list.append(tmp)
+ else:
+ return_site_list.append( site_dict[site_filter_name])
+
+
+ return return_site_list
+
+
+
+
+
+ #TODO : Check rights to delete person
+ def DeletePerson(self, person_record):
+ """ Disable an existing account in senslab LDAP.
+ Users and techs can only delete themselves. PIs can only
+ delete themselves and other non-PIs at their sites.
+ ins can delete anyone.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ #Disable user account in senslab LDAP
+ ret = self.ldap.LdapMarkUserAsDeleted(person_record)
+ logger.warning("SLABDRIVER DeletePerson %s " %(person_record))
+ return ret
+
+ #TODO Check DeleteSlice, check rights 05/07/2012 SA
+ def DeleteSlice(self, slice_record):
+ """ Deletes the specified slice.
+ Senslab : Kill the job associated with the slice if there is one
+ using DeleteSliceFromNodes.
+ Updates the slice record in slab db to remove the slice nodes.
+
+ Users may only delete slices of which they are members. PIs may
+ delete any of the slices at their sites, or any slices of which
+ they are members. Admins may delete any slice.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ self.DeleteSliceFromNodes(slice_record)
+ logger.warning("SLABDRIVER DeleteSlice %s "%(slice_record))
+ return
+
+ @staticmethod
+ def __add_person_to_db(user_dict):
+
+ check_if_exists = dbsession.query(RegUser).filter_by(email = user_dict['email']).first()
+ #user doesn't exists
+ if not check_if_exists:
+ logger.debug("__add_person_to_db \t Adding %s \r\n \r\n \
+ _________________________________________________________________________\
+ " %(user_dict))
+ hrn = user_dict['hrn']
+ person_urn = hrn_to_urn(hrn, 'user')
+ pubkey = user_dict['pkey']
+ try:
+ pkey = convert_public_key(pubkey)
+ except TypeError:
+ #key not good. create another pkey
+ self.logger.warn('__add_person_to_db: unable to convert public \
+ key for %s' %(hrn ))
+ pkey = Keypair(create=True)
+
+
+ if pubkey is not None and pkey is not None :
+ hierarchy = Hierarchy()
+ person_gid = hierarchy.create_gid(person_urn, create_uuid(), pkey)
+ if user_dict['email']:
+ logger.debug("__add_person_to_db \r\n \r\n SLAB IMPORTER PERSON EMAIL OK email %s " %(user_dict['email']))
+ person_gid.set_email(user_dict['email'])
+
+ user_record = RegUser(hrn=hrn , pointer= '-1', authority=get_authority(hrn), \
+ email=user_dict['email'], gid = person_gid)
+ user_record.reg_keys = [RegKey(user_dict['pkey'])]
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ return
+
+ #TODO AddPerson 04/07/2012 SA
+ #def AddPerson(self, auth, person_fields=None):
+ def AddPerson(self, record):#TODO fixing 28/08//2012 SA
+ """Adds a new account. Any fields specified in records are used,
+ otherwise defaults are used.
+ Accounts are disabled by default. To enable an account,
+ use UpdatePerson().
+ Returns the new person_id (> 0) if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ ret = self.ldap.LdapAddUser(record)
+
+ record['hrn'] = self.root_auth + '.' + ret['uid']
+ logger.debug("SLABDRIVER AddPerson return code %s record %s \r\n "%(ret,record))
+ self.__add_person_to_db(record)
+ return ret['uid']
+
+ #TODO AddPersonToSite 04/07/2012 SA
+ def AddPersonToSite (self, auth, person_id_or_email, \
+ site_id_or_login_base=None):
+ """ Adds the specified person to the specified site. If the person is
+ already a member of the site, no errors are returned. Does not change
+ the person's primary site.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER AddPersonToSite EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO AddRoleToPerson : Not sure if needed in senslab 04/07/2012 SA
+ def AddRoleToPerson(self, auth, role_id_or_name, person_id_or_email):
+ """Grants the specified role to the person.
+ PIs can only grant the tech and user roles to users and techs at their
+ sites. Admins can grant any role to any user.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER AddRoleToPerson EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO AddPersonKey 04/07/2012 SA
+ def AddPersonKey(self, auth, person_id_or_email, key_fields=None):
+ """Adds a new key to the specified account.
+ Non-admins can only modify their own keys.
+ Returns the new key_id (> 0) if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER AddPersonKey EMPTY - DO NOTHING \r\n ")
+ return
+
+ def DeleteLeases(self, leases_id_list, slice_hrn ):
+ logger.debug("SLABDRIVER DeleteLeases leases_id_list %s slice_hrn %s \
+ \r\n " %(leases_id_list, slice_hrn))
+ for job_id in leases_id_list:
+ self.DeleteJobs(job_id, slice_hrn)
+
+
+ return
+
+
+
+
+ def LaunchExperimentOnOAR(self, added_nodes, slice_name, \
+ lease_start_time, lease_duration, slice_user=None):
+ lease_dict = {}
+ lease_dict['lease_start_time'] = lease_start_time
+ lease_dict['lease_duration'] = lease_duration
+ lease_dict['added_nodes'] = added_nodes
+ lease_dict['slice_name'] = slice_name
+ lease_dict['slice_user'] = slice_user
+ lease_dict['grain'] = self.GetLeaseGranularity()
+ lease_dict['time_format'] = self.time_format
+
+
+ def __create_job_structure_request_for_OAR(lease_dict):
+ """ Creates the structure needed for a correct POST on OAR.
+ Makes the timestamp transformation into the appropriate format.
+ Sends the POST request to create the job with the resources in
+ added_nodes.
+
+ """
+
+ nodeid_list = []
+ reqdict = {}
+
+
+ reqdict['workdir'] = '/tmp'
+ reqdict['resource'] = "{network_address in ("
+
+ for node in lease_dict['added_nodes']:
+ logger.debug("\r\n \r\n OARrestapi \t \
+ __create_job_structure_request_for_OAR node %s" %(node))
+
+ # Get the ID of the node
+ nodeid = node
+ reqdict['resource'] += "'" + nodeid + "', "
+ nodeid_list.append(nodeid)
+
+ custom_length = len(reqdict['resource'])- 2
+ reqdict['resource'] = reqdict['resource'][0:custom_length] + \
+ ")}/nodes=" + str(len(nodeid_list))
+
+ def __process_walltime(duration):
+ """ Calculates the walltime in seconds from the duration in H:M:S
+ specified in the RSpec.
+
+ """
+ if duration:
+ # Fixing the walltime by adding a few delays.
+ # First put the walltime in seconds oarAdditionalDelay = 20;
+ # additional delay for /bin/sleep command to
+ # take in account prologue and epilogue scripts execution
+ # int walltimeAdditionalDelay = 240; additional delay
+ desired_walltime = duration
+ total_walltime = desired_walltime + 240 #+4 min Update SA 23/10/12
+ sleep_walltime = desired_walltime # 0 sec added Update SA 23/10/12
+ walltime = []
+ #Put the walltime back in str form
+ #First get the hours
+ walltime.append(str(total_walltime / 3600))
+ total_walltime = total_walltime - 3600 * int(walltime[0])
+ #Get the remaining minutes
+ walltime.append(str(total_walltime / 60))
+ total_walltime = total_walltime - 60 * int(walltime[1])
+ #Get the seconds
+ walltime.append(str(total_walltime))
+
+ else:
+ logger.log_exc(" __process_walltime duration null")
+
+ return walltime, sleep_walltime
+
+
+ walltime, sleep_walltime = \
+ __process_walltime(int(lease_dict['lease_duration'])*lease_dict['grain'])
+
+
+ reqdict['resource'] += ",walltime=" + str(walltime[0]) + \
+ ":" + str(walltime[1]) + ":" + str(walltime[2])
+ reqdict['script_path'] = "/bin/sleep " + str(sleep_walltime)
+
+ #In case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if lease_dict['lease_start_time'] is not '0':
+ #Readable time accepted by OAR
+ start_time = datetime.fromtimestamp(int(lease_dict['lease_start_time'])).\
+ strftime(lease_dict['time_format'])
+ reqdict['reservation'] = start_time
+ #If there is not start time, Immediate XP. No need to add special
+ # OAR parameters
+
+
+ reqdict['type'] = "deploy"
+ reqdict['directory'] = ""
+ reqdict['name'] = "SFA_" + lease_dict['slice_user']
+
+ return reqdict
+
+ logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR slice_user %s\
+ \r\n " %(slice_user))
+ #Create the request for OAR
+ reqdict = __create_job_structure_request_for_OAR(lease_dict)
+ # first step : start the OAR job and update the job
+ logger.debug("SLABDRIVER.PY \tLaunchExperimentOnOAR reqdict %s\
+ \r\n " %(reqdict))
+
+ answer = self.oar.POSTRequestToOARRestAPI('POST_job', \
+ reqdict, slice_user)
+ logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s " %(answer))
+ try:
+ jobid = answer['id']
+ except KeyError:
+ logger.log_exc("SLABDRIVER \tLaunchExperimentOnOAR \
+ Impossible to create job %s " %(answer))
+ return None
+
+
+
+
+ if jobid :
+ logger.debug("SLABDRIVER \tLaunchExperimentOnOAR jobid %s \
+ added_nodes %s slice_user %s" %(jobid, added_nodes, slice_user))
+
+
+ return jobid
+
+
+ def AddLeases(self, hostname_list, slice_record, \
+ lease_start_time, lease_duration):
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases hostname_list %s \
+ slice_record %s lease_start_time %s lease_duration %s "\
+ %( hostname_list, slice_record , lease_start_time, \
+ lease_duration))
+
+ #tmp = slice_record['reg-researchers'][0].split(".")
+ username = slice_record['login']
+ #username = tmp[(len(tmp)-1)]
+ job_id = self.LaunchExperimentOnOAR(hostname_list, slice_record['hrn'], \
+ lease_start_time, lease_duration, username)
+ start_time = datetime.fromtimestamp(int(lease_start_time)).strftime(self.time_format)
+ end_time = lease_start_time + lease_duration
+
+ import logging, logging.handlers
+ from sfa.util.sfalogging import _SfaLogger
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases TURN ON LOGGING SQL %s %s %s "%(slice_record['hrn'], job_id, end_time))
+ sql_logger = _SfaLogger(loggername = 'sqlalchemy.engine', level=logging.DEBUG)
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases %s %s %s " %(type(slice_record['hrn']), type(job_id), type(end_time)))
+
+ slab_ex_row = SenslabXP(slice_hrn = slice_record['hrn'], \
+ job_id = job_id, end_time= end_time)
+
+ logger.debug("SLABDRIVER \r\n \r\n \t AddLeases slab_ex_row %s" \
+ %(slab_ex_row))
+ slab_dbsession.add(slab_ex_row)
+ slab_dbsession.commit()
+
+ logger.debug("SLABDRIVER \t AddLeases hostname_list start_time %s " %(start_time))
+
+ return
+
+
+ #Delete the jobs from job_senslab table
+ def DeleteSliceFromNodes(self, slice_record):
+ logger.debug("SLABDRIVER \t DeleteSliceFromNodese %s " %(slice_record))
+ if isinstance(slice_record['oar_job_id'],list):
+ for job_id in slice_record['oar_job_id']:
+ self.DeleteJobs(job_id, slice_record['user'])
+ else:
+ self.DeleteJobs(slice_record['oar_job_id'],slice_record['user'])
+ return
+
+
+ def GetLeaseGranularity(self):
+ """ Returns the granularity of Senslab testbed.
+ OAR returns seconds for experiments duration.
+ Defined in seconds.
+ Experiments which last less than 10 min are invalid"""
+
+
+ return self.grain
+
+
+ @staticmethod
+ def update_jobs_in_slabdb( job_oar_list, jobs_psql):
+ #Get all the entries in slab_xp table
+
+
+ jobs_psql = set(jobs_psql)
+ kept_jobs = set(job_oar_list).intersection(jobs_psql)
+ logger.debug ( "\r\n \t\ update_jobs_in_slabdb jobs_psql %s \r\n \t \
+ job_oar_list %s kept_jobs %s "%(jobs_psql, job_oar_list, kept_jobs))
+ deleted_jobs = set(jobs_psql).difference(kept_jobs)
+ deleted_jobs = list(deleted_jobs)
+ if len(deleted_jobs) > 0:
+ slab_dbsession.query(SenslabXP).filter(SenslabXP.job_id.in_(deleted_jobs)).delete(synchronize_session='fetch')
+ slab_dbsession.commit()
+
+ return
+
+
+
+ def GetLeases(self, lease_filter_dict=None, login=None):
+
+
+ unfiltered_reservation_list = self.GetReservedNodes(login)
+
+ reservation_list = []
+ #Find the slice associated with this user senslab ldap uid
+ logger.debug(" SLABDRIVER.PY \tGetLeases login %s\
+ unfiltered_reservation_list %s " %(login, unfiltered_reservation_list))
+ #Create user dict first to avoid looking several times for
+ #the same user in LDAP SA 27/07/12
+ resa_user_dict = {}
+ job_oar_list = []
+
+ jobs_psql_query = slab_dbsession.query(SenslabXP).all()
+ jobs_psql_dict = [ (row.job_id, row.__dict__ )for row in jobs_psql_query ]
+ jobs_psql_dict = dict(jobs_psql_dict)
+ logger.debug("SLABDRIVER \tGetLeases jobs_psql_dict %s"\
+ %(jobs_psql_dict))
+ jobs_psql_id_list = [ row.job_id for row in jobs_psql_query ]
+
+
+
+ for resa in unfiltered_reservation_list:
+ logger.debug("SLABDRIVER \tGetLeases USER %s"\
+ %(resa['user']))
+ #Cosntruct list of jobs (runing, waiting..) in oar
+ job_oar_list.append(resa['lease_id'])
+ #If there is information on the job in SLAB DB (slice used and job id)
+ if resa['lease_id'] in jobs_psql_dict:
+ job_info = jobs_psql_dict[resa['lease_id']]
+ logger.debug("SLABDRIVER \tGetLeases resa_user_dict %s"\
+ %(resa_user_dict))
+ resa['slice_hrn'] = job_info['slice_hrn']
+ resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+
+ #Assume it is a senslab slice:
+ else:
+ resa['slice_id'] = hrn_to_urn(self.root_auth+'.'+ resa['user'] +"_slice" , 'slice')
+ #if resa['user'] not in resa_user_dict:
+ #logger.debug("SLABDRIVER \tGetLeases userNOTIN ")
+ #ldap_info = self.ldap.LdapSearch('(uid='+resa['user']+')')
+ #if ldap_info:
+ #ldap_info = ldap_info[0][1]
+ ##Get the backref :relationship table reg-researchers
+ #user = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(email = \
+ #ldap_info['mail'][0])
+ #if user:
+ #user = user.first()
+ #user = user.__dict__
+ #slice_info = user['reg_slices_as_researcher'][0].__dict__
+ ##Separated in case user not in database :
+ ##record_id not defined SA 17/07//12
+
+ ##query_slice_info = slab_dbsession.query(SenslabXP).filter_by(record_id_user = user.record_id)
+ ##if query_slice_info:
+ ##slice_info = query_slice_info.first()
+ ##else:
+ ##slice_info = None
+
+ #resa_user_dict[resa['user']] = {}
+ #resa_user_dict[resa['user']]['ldap_info'] = user
+ #resa_user_dict[resa['user']]['slice_info'] = slice_info
+
+ #resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info']['hrn']
+ #resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+
+ resa['slice_hrn'] = Xrn(resa['slice_id']).get_hrn()
+
+ resa['component_id_list'] = []
+ #Transform the hostnames into urns (component ids)
+ for node in resa['reserved_nodes']:
+ #resa['component_id_list'].append(hostname_to_urn(self.hrn, \
+ #self.root_auth, node['hostname']))
+ slab_xrn = slab_xrn_object(self.root_auth, node)
+ resa['component_id_list'].append(slab_xrn.urn)
+
+ if lease_filter_dict:
+ logger.debug("SLABDRIVER \tGetLeases resa_ %s \r\n leasefilter %s"\
+ %(resa,lease_filter_dict))
+
+ if lease_filter_dict['name'] == resa['slice_hrn']:
+ reservation_list.append(resa)
+
+ if lease_filter_dict is None:
+ reservation_list = unfiltered_reservation_list
+ #else:
+ #del unfiltered_reservation_list[unfiltered_reservation_list.index(resa)]
+
+
+ self.update_jobs_in_slabdb(job_oar_list, jobs_psql_id_list)
+
+ #for resa in unfiltered_reservation_list:
+
+
+ ##Put the slice_urn
+ #if resa['user'] in resa_user_dict:
+ #resa['slice_hrn'] = resa_user_dict[resa['user']]['slice_info']['hrn']
+ #resa['slice_id'] = hrn_to_urn(resa['slice_hrn'], 'slice')
+ ##Put the slice_urn
+ ##resa['slice_id'] = hrn_to_urn(slice_info.slice_hrn, 'slice')
+ #resa['component_id_list'] = []
+ ##Transform the hostnames into urns (component ids)
+ #for node in resa['reserved_nodes']:
+ ##resa['component_id_list'].append(hostname_to_urn(self.hrn, \
+ ##self.root_auth, node['hostname']))
+ #slab_xrn = slab_xrn_object(self.root_auth, node)
+ #resa['component_id_list'].append(slab_xrn.urn)
+
+ ##Filter the reservation list if necessary
+ ##Returns all the leases associated with a given slice
+ #if lease_filter_dict:
+ #logger.debug("SLABDRIVER \tGetLeases lease_filter_dict %s"\
+ #%(lease_filter_dict))
+ #for resa in unfiltered_reservation_list:
+ #if lease_filter_dict['name'] == resa['slice_hrn']:
+ #reservation_list.append(resa)
+ #else:
+ #reservation_list = unfiltered_reservation_list
+
+ logger.debug(" SLABDRIVER.PY \tGetLeases reservation_list %s"\
+ %(reservation_list))
+ return reservation_list
+
+
+
+
+#TODO FUNCTIONS SECTION 04/07/2012 SA
+
+ #TODO : Is UnBindObjectFromPeer still necessary ? Currently does nothing
+ #04/07/2012 SA
+ @staticmethod
+ def UnBindObjectFromPeer( auth, object_type, object_id, shortname):
+ """ This method is a hopefully temporary hack to let the sfa correctly
+ detach the objects it creates from a remote peer object. This is
+ needed so that the sfa federation link can work in parallel with
+ RefreshPeer, as RefreshPeer depends on remote objects being correctly
+ marked.
+ Parameters:
+ auth : struct, API authentication structure
+ AuthMethod : string, Authentication method to use
+ object_type : string, Object type, among 'site','person','slice',
+ 'node','key'
+ object_id : int, object_id
+ shortname : string, peer shortname
+ FROM PLC DOC
+
+ """
+ logger.warning("SLABDRIVER \tUnBindObjectFromPeer EMPTY-\
+ DO NOTHING \r\n ")
+ return
+
+ #TODO Is BindObjectToPeer still necessary ? Currently does nothing
+ #04/07/2012 SA
+ def BindObjectToPeer(self, auth, object_type, object_id, shortname=None, \
+ remote_object_id=None):
+ """This method is a hopefully temporary hack to let the sfa correctly
+ attach the objects it creates to a remote peer object. This is needed
+ so that the sfa federation link can work in parallel with RefreshPeer,
+ as RefreshPeer depends on remote objects being correctly marked.
+ Parameters:
+ shortname : string, peer shortname
+ remote_object_id : int, remote object_id, set to 0 if unknown
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER \tBindObjectToPeer EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO UpdateSlice 04/07/2012 SA
+ #Funciton should delete and create another job since oin senslab slice=job
+ def UpdateSlice(self, auth, slice_id_or_name, slice_fields=None):
+ """Updates the parameters of an existing slice with the values in
+ slice_fields.
+ Users may only update slices of which they are members.
+ PIs may update any of the slices at their sites, or any slices of
+ which they are members. Admins may update any slice.
+ Only PIs and admins may update max_nodes. Slices cannot be renewed
+ (by updating the expires parameter) more than 8 weeks into the future.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER UpdateSlice EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO UpdatePerson 04/07/2012 SA
+ def UpdatePerson(self, slab_hrn, federated_hrn, person_fields=None):
+ """Updates a person. Only the fields specified in person_fields
+ are updated, all other fields are left untouched.
+ Users and techs can only update themselves. PIs can only update
+ themselves and other non-PIs at their sites.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ #new_row = FederatedToSenslab(slab_hrn, federated_hrn)
+ #slab_dbsession.add(new_row)
+ #slab_dbsession.commit()
+
+ logger.debug("SLABDRIVER UpdatePerson EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO GetKeys 04/07/2012 SA
+ def GetKeys(self, auth, key_filter=None, return_fields=None):
+ """Returns an array of structs containing details about keys.
+ If key_filter is specified and is an array of key identifiers,
+ or a struct of key attributes, only keys matching the filter
+ will be returned. If return_fields is specified, only the
+ specified details will be returned.
+
+ Admin may query all keys. Non-admins may only query their own keys.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER GetKeys EMPTY - DO NOTHING \r\n ")
+ return
+
+ #TODO DeleteKey 04/07/2012 SA
+ def DeleteKey(self, key_id):
+ """ Deletes a key.
+ Non-admins may only delete their own keys.
+ Returns 1 if successful, faults otherwise.
+ FROM PLC API DOC
+
+ """
+ logger.warning("SLABDRIVER DeleteKey EMPTY - DO NOTHING \r\n ")
+ return
+
+
+
+
+ @staticmethod
+ def _sql_get_slice_info( slice_filter ):
+ #DO NOT USE RegSlice - reg_researchers to get the hrn
+ #of the user otherwise will mess up the RegRecord in
+ #Resolve, don't know why - SA 08/08/2012
+
+ #Only one entry for one user = one slice in slab_xp table
+ #slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
+ raw_slicerec = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn = slice_filter).first()
+ #raw_slicerec = dbsession.query(RegRecord).filter_by(hrn = slice_filter).first()
+ if raw_slicerec:
+ #load_reg_researcher
+ #raw_slicerec.reg_researchers
+ raw_slicerec = raw_slicerec.__dict__
+ logger.debug(" SLABDRIVER \t get_slice_info slice_filter %s \
+ raw_slicerec %s"%(slice_filter, raw_slicerec))
+ slicerec = raw_slicerec
+ #only one researcher per slice so take the first one
+ #slicerec['reg_researchers'] = raw_slicerec['reg_researchers']
+ #del slicerec['reg_researchers']['_sa_instance_state']
+ return slicerec
+
+ else :
+ return None
+
+ @staticmethod
+ def _sql_get_slice_info_from_user(slice_filter ):
+ #slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
+ raw_slicerec = dbsession.query(RegUser).options(joinedload('reg_slices_as_researcher')).filter_by(record_id = slice_filter).first()
+ #raw_slicerec = dbsession.query(RegRecord).filter_by(record_id = slice_filter).first()
+ #Put it in correct order
+ user_needed_fields = ['peer_authority', 'hrn', 'last_updated', 'classtype', 'authority', 'gid', 'record_id', 'date_created', 'type', 'email', 'pointer']
+ slice_needed_fields = ['peer_authority', 'hrn', 'last_updated', 'classtype', 'authority', 'gid', 'record_id', 'date_created', 'type', 'pointer']
+ if raw_slicerec:
+ #raw_slicerec.reg_slices_as_researcher
+ raw_slicerec = raw_slicerec.__dict__
+ slicerec = {}
+ slicerec = \
+ dict([(k, raw_slicerec['reg_slices_as_researcher'][0].__dict__[k]) \
+ for k in slice_needed_fields])
+ slicerec['reg_researchers'] = dict([(k, raw_slicerec[k]) \
+ for k in user_needed_fields])
+ #TODO Handle multiple slices for one user SA 10/12/12
+ #for now only take the first slice record associated to the rec user
+ ##slicerec = raw_slicerec['reg_slices_as_researcher'][0].__dict__
+ #del raw_slicerec['reg_slices_as_researcher']
+ #slicerec['reg_researchers'] = raw_slicerec
+ ##del slicerec['_sa_instance_state']
+
+ return slicerec
+
+ else:
+ return None
+
+ def _get_slice_records(self, slice_filter = None, \
+ slice_filter_type = None):
+
+ #login = None
+
+ #Get list of slices based on the slice hrn
+ if slice_filter_type == 'slice_hrn':
+
+ #if get_authority(slice_filter) == self.root_auth:
+ #login = slice_filter.split(".")[1].split("_")[0]
+
+ slicerec = self._sql_get_slice_info(slice_filter)
+
+ if slicerec is None:
+ return None
+ #return login, None
+
+ #Get slice based on user id
+ if slice_filter_type == 'record_id_user':
+
+ slicerec = self._sql_get_slice_info_from_user(slice_filter)
+
+ if slicerec:
+ fixed_slicerec_dict = slicerec
+ #At this point if the there is no login it means
+ #record_id_user filter has been used for filtering
+ #if login is None :
+ ##If theslice record is from senslab
+ #if fixed_slicerec_dict['peer_authority'] is None:
+ #login = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ #return login, fixed_slicerec_dict
+ return fixed_slicerec_dict
+
+
+
+ def GetSlices(self, slice_filter = None, slice_filter_type = None, login=None):
+ """ Get the slice records from the slab db.
+ Returns a slice ditc if slice_filter and slice_filter_type
+ are specified.
+ Returns a list of slice dictionnaries if there are no filters
+ specified.
+
+ """
+ #login = None
+ authorized_filter_types_list = ['slice_hrn', 'record_id_user']
+ return_slicerec_dictlist = []
+
+ #First try to get information on the slice based on the filter provided
+ if slice_filter_type in authorized_filter_types_list:
+ fixed_slicerec_dict = \
+ self._get_slice_records(slice_filter, slice_filter_type)
+ #login, fixed_slicerec_dict = \
+ #self._get_slice_records(slice_filter, slice_filter_type)
+ logger.debug(" SLABDRIVER \tGetSlices login %s \
+ slice record %s slice_filter %s slice_filter_type %s "\
+ %(login, fixed_slicerec_dict,slice_filter, slice_filter_type))
+
+
+ #Now we have the slice record fixed_slicerec_dict, get the
+ #jobs associated to this slice
+ #leases_list = self.GetReservedNodes(username = login)
+ leases_list = self.GetLeases(login = login)
+ #If no job is running or no job scheduled
+ #return only the slice record
+ if leases_list == [] and fixed_slicerec_dict:
+ return_slicerec_dictlist.append(fixed_slicerec_dict)
+
+ #If several jobs for one slice , put the slice record into
+ # each lease information dict
+
+
+ for lease in leases_list :
+ slicerec_dict = {}
+ logger.debug("SLABDRIVER.PY \tGetSlices slice_filter %s \
+ \ lease['slice_hrn'] %s" \
+ %(slice_filter, lease['slice_hrn']))
+ if slice_filter_type =='slice_hrn' and lease['slice_hrn'] == slice_filter:
+ reserved_list = lease['reserved_nodes']
+ slicerec_dict['slice_hrn'] = lease['slice_hrn']
+ slicerec_dict['hrn'] = lease['slice_hrn']
+ slicerec_dict['user'] = lease['user']
+ slicerec_dict['oar_job_id'] = lease['lease_id']
+ slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
+ slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+
+ #Update lease dict with the slice record
+ if fixed_slicerec_dict:
+ fixed_slicerec_dict['oar_job_id'] = []
+ fixed_slicerec_dict['oar_job_id'].append(slicerec_dict['oar_job_id'])
+ slicerec_dict.update(fixed_slicerec_dict)
+ #slicerec_dict.update({'hrn':\
+ #str(fixed_slicerec_dict['slice_hrn'])})
+
+ return_slicerec_dictlist.append(slicerec_dict)
+ logger.debug("SLABDRIVER.PY \tGetSlices \
+ OHOHOHOH %s" %(return_slicerec_dictlist ))
+
+ logger.debug("SLABDRIVER.PY \tGetSlices \
+ slicerec_dict %s return_slicerec_dictlist %s \
+ lease['reserved_nodes'] \
+ %s" %(slicerec_dict, return_slicerec_dictlist, \
+ lease['reserved_nodes'] ))
+
+ logger.debug("SLABDRIVER.PY \tGetSlices RETURN \
+ return_slicerec_dictlist %s" \
+ %(return_slicerec_dictlist))
+
+ return return_slicerec_dictlist
+
+
+ else:
+ #Get all slices from the senslab sfa database ,
+ #put them in dict format
+ #query_slice_list = dbsession.query(RegRecord).all()
+ query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
+ #query_slice_list = dbsession.query(RegRecord).filter_by(type='slice').all()
+ #query_slice_list = slab_dbsession.query(SenslabXP).all()
+ return_slicerec_dictlist = []
+ for record in query_slice_list:
+ tmp = record.__dict__
+ tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
+ #del tmp['reg_researchers']['_sa_instance_state']
+ return_slicerec_dictlist.append(tmp)
+ #return_slicerec_dictlist.append(record.__dict__)
+
+ #Get all the jobs reserved nodes
+ leases_list = self.GetReservedNodes()
+
+
+ for fixed_slicerec_dict in return_slicerec_dictlist:
+ slicerec_dict = {}
+ #Check if the slice belongs to a senslab user
+ if fixed_slicerec_dict['peer_authority'] is None:
+ owner = fixed_slicerec_dict['hrn'].split(".")[1].split("_")[0]
+ else:
+ owner = None
+ for lease in leases_list:
+ if owner == lease['user']:
+ slicerec_dict['oar_job_id'] = lease['lease_id']
+
+ #for reserved_node in lease['reserved_nodes']:
+ logger.debug("SLABDRIVER.PY \tGetSlices lease %s "\
+ %(lease ))
+
+ reserved_list = lease['reserved_nodes']
+
+ slicerec_dict.update({'node_ids':lease['reserved_nodes']})
+ slicerec_dict.update({'list_node_ids':{'hostname':reserved_list}})
+ slicerec_dict.update(fixed_slicerec_dict)
+ #slicerec_dict.update({'hrn':\
+ #str(fixed_slicerec_dict['slice_hrn'])})
+ #return_slicerec_dictlist.append(slicerec_dict)
+ fixed_slicerec_dict.update(slicerec_dict)
+
+ logger.debug("SLABDRIVER.PY \tGetSlices RETURN \
+ return_slicerec_dictlist %s \slice_filter %s " \
+ %(return_slicerec_dictlist, slice_filter))
+
+ return return_slicerec_dictlist
+
+
+
+
+ ##
+ # Convert SFA fields to PLC fields for use when registering up updating
+ # registry record in the PLC database
+ #
+ # @param type type of record (user, slice, ...)
+ # @param hrn human readable name
+ # @param sfa_fields dictionary of SFA fields
+ # @param slab_fields dictionary of PLC fields (output)
+ @staticmethod
+ def sfa_fields_to_slab_fields(sfa_type, hrn, record):
+
+
+ slab_record = {}
+ #for field in record:
+ # slab_record[field] = record[field]
+
+ if sfa_type == "slice":
+ #instantion used in get_slivers ?
+ if not "instantiation" in slab_record:
+ slab_record["instantiation"] = "senslab-instantiated"
+ #slab_record["hrn"] = hrn_to_pl_slicename(hrn)
+ #Unused hrn_to_pl_slicename because Slab's hrn already
+ #in the appropriate form SA 23/07/12
+ slab_record["hrn"] = hrn
+ logger.debug("SLABDRIVER.PY sfa_fields_to_slab_fields \
+ slab_record %s " %(slab_record['hrn']))
+ if "url" in record:
+ slab_record["url"] = record["url"]
+ if "description" in record:
+ slab_record["description"] = record["description"]
+ if "expires" in record:
+ slab_record["expires"] = int(record["expires"])
+
+ #nodes added by OAR only and then imported to SFA
+ #elif type == "node":
+ #if not "hostname" in slab_record:
+ #if not "hostname" in record:
+ #raise MissingSfaInfo("hostname")
+ #slab_record["hostname"] = record["hostname"]
+ #if not "model" in slab_record:
+ #slab_record["model"] = "geni"
+
+ #One authority only
+ #elif type == "authority":
+ #slab_record["login_base"] = hrn_to_slab_login_base(hrn)
+
+ #if not "name" in slab_record:
+ #slab_record["name"] = hrn
+
+ #if not "abbreviated_name" in slab_record:
+ #slab_record["abbreviated_name"] = hrn
+
+ #if not "enabled" in slab_record:
+ #slab_record["enabled"] = True
+
+ #if not "is_public" in slab_record:
+ #slab_record["is_public"] = True
+
+ return slab_record
+
+
+
+
+ def __transforms_timestamp_into_date(self, xp_utc_timestamp = None):
+ """ Transforms unix timestamp into valid OAR date format """
+
+ #Used in case of a scheduled experiment (not immediate)
+ #To run an XP immediately, don't specify date and time in RSpec
+ #They will be set to None.
+ if xp_utc_timestamp:
+ #transform the xp_utc_timestamp into server readable time
+ xp_server_readable_date = datetime.fromtimestamp(int(\
+ xp_utc_timestamp)).strftime(self.time_format)
+
+ return xp_server_readable_date
+
+ else:
+ return None
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+class SlabDriver(Driver):
+ """ Senslab Driver class inherited from Driver generic class.
+
+ Contains methods compliant with the SFA standard and the testbed
+ infrastructure (calls to LDAP and OAR).
+ """
+ def __init__(self, config):
+ Driver.__init__ (self, config)
+ self.config = config
+ self.hrn = config.SFA_INTERFACE_HRN
+
+ self.db = SlabDB(config, debug = False)
+ self.slab_api = SlabTestbedAPI(config)
+ self.cache = None
+
+ def augment_records_with_testbed_info (self, record_list ):
+ """ Adds specific testbed info to the records. """
+ return self.fill_record_info (record_list)
+
+ def fill_record_info(self, record_list):
+ """
+ Given a SFA record, fill in the senslab specific and SFA specific
+ fields in the record.
+ """
+
+ logger.debug("SLABDRIVER \tfill_record_info records %s " %(record_list))
+ if not isinstance(record_list, list):
+ record_list = [record_list]
+
+ try:
+ for record in record_list:
+ #If the record is a SFA slice record, then add information
+ #about the user of this slice. This kind of
+ #information is in the Senslab's DB.
+ if str(record['type']) == 'slice':
+ if 'reg_researchers' in record and \
+ isinstance(record['reg_researchers'], list) :
+ record['reg_researchers'] = record['reg_researchers'][0].__dict__
+ record.update({'PI':[record['reg_researchers']['hrn']],
+ 'researcher': [record['reg_researchers']['hrn']],
+ 'name':record['hrn'],
+ 'oar_job_id':[],
+ 'node_ids': [],
+ 'person_ids':[record['reg_researchers']['record_id']],
+ 'geni_urn':'', #For client_helper.py compatibility
+ 'keys':'', #For client_helper.py compatibility
+ 'key_ids':''}) #For client_helper.py compatibility
+
+
+ #Get slab slice record.
+ recslice_list = self.slab_api.GetSlices(slice_filter = \
+ str(record['hrn']),\
+ slice_filter_type = 'slice_hrn')
+
+
+ logger.debug("SLABDRIVER \tfill_record_info \
+ TYPE SLICE RECUSER record['hrn'] %s ecord['oar_job_id']\
+ %s " %(record['hrn'], record['oar_job_id']))
+ try:
+ for rec in recslice_list:
+ logger.debug("SLABDRIVER\r\n \t fill_record_info oar_job_id %s " %(rec['oar_job_id']))
+ del record['reg_researchers']
+ record['node_ids'] = [ self.slab_api.root_auth + hostname for hostname in rec['node_ids']]
+ except KeyError:
+ pass
+
+ logger.debug( "SLABDRIVER.PY \t fill_record_info SLICE \
+ recslice_list %s \r\n \t RECORD %s \r\n \
+ \r\n" %(recslice_list, record))
+ if str(record['type']) == 'user':
+ #The record is a SFA user record.
+ #Get the information about his slice from Senslab's DB
+ #and add it to the user record.
+ recslice_list = self.slab_api.GetSlices(\
+ slice_filter = record['record_id'],\
+ slice_filter_type = 'record_id_user')
+
+ logger.debug( "SLABDRIVER.PY \t fill_record_info TYPE USER \
+ recslice_list %s \r\n \t RECORD %s \r\n" %(recslice_list , record))
+ #Append slice record in records list,
+ #therefore fetches user and slice info again(one more loop)
+ #Will update PIs and researcher for the slice
+ #recuser = dbsession.query(RegRecord).filter_by(record_id = \
+ #recslice_list[0]['record_id_user']).first()
+ recuser = recslice_list[0]['reg_researchers']
+ logger.debug( "SLABDRIVER.PY \t fill_record_info USER \
+ recuser %s \r\n \r\n" %(recuser))
+ recslice = {}
+ recslice = recslice_list[0]
+ recslice.update({'PI':[recuser['hrn']],
+ 'researcher': [recuser['hrn']],
+ 'name':record['hrn'],
+ 'node_ids': [],
+ 'oar_job_id': [],
+ 'person_ids':[recuser['record_id']]})
+ try:
+ for rec in recslice_list:
+ recslice['oar_job_id'].append(rec['oar_job_id'])
+ except KeyError:
+ pass
+
+ recslice.update({'type':'slice', \
+ 'hrn':recslice_list[0]['hrn']})
+
+
+ #GetPersons takes [] as filters
+ user_slab = self.slab_api.GetPersons([record])
+
+
+ record.update(user_slab[0])
+ #For client_helper.py compatibility
+ record.update( { 'geni_urn':'',
+ 'keys':'',
+ 'key_ids':'' })
+ record_list.append(recslice)
+
+ logger.debug("SLABDRIVER.PY \tfill_record_info ADDING SLICE\
+ INFO TO USER records %s" %(record_list))
+
+ logger.debug("SLABDRIVER.PY \tfill_record_info END \
+ record %s \r\n \r\n " %(record))
+
+ except TypeError, error:
+ logger.log_exc("SLABDRIVER \t fill_record_info EXCEPTION %s"\
+ %(error))
+ #logger.debug("SLABDRIVER.PY \t fill_record_info ENDENDEND ")
+
+ return
+
+
+ def sliver_status(self, slice_urn, slice_hrn):
+ """Receive a status request for slice named urn/hrn
+ urn:publicid:IDN+senslab+nturro_slice hrn senslab.nturro_slice
+ shall return a structure as described in
+ http://groups.geni.net/geni/wiki/GAPI_AM_API_V2#SliverStatus
+ NT : not sure if we should implement this or not, but used by sface.
+
+ """
+
+ #First get the slice with the slice hrn
+ slice_list = self.slab_api.GetSlices(slice_filter = slice_hrn, \
+ slice_filter_type = 'slice_hrn')
+
+ if len(slice_list) is 0:
+ raise SliverDoesNotExist("%s slice_hrn" % (slice_hrn))
+
+ #Used for fetching the user info witch comes along the slice info
+ one_slice = slice_list[0]
+
+
+ #Make a list of all the nodes hostnames in use for this slice
+ slice_nodes_list = []
+ #for single_slice in slice_list:
+ #for node in single_slice['node_ids']:
+ #slice_nodes_list.append(node['hostname'])
+ for node in one_slice:
+ slice_nodes_list.append(node['hostname'])
+
+ #Get all the corresponding nodes details
+ nodes_all = self.slab_api.GetNodes({'hostname':slice_nodes_list},
+ ['node_id', 'hostname','site','boot_state'])
+ nodeall_byhostname = dict([(one_node['hostname'], one_node) \
+ for one_node in nodes_all])
+
+
+
+ for single_slice in slice_list:
+
+ #For compatibility
+ top_level_status = 'empty'
+ result = {}
+ result.fromkeys(\
+ ['geni_urn','pl_login','geni_status','geni_resources'], None)
+ result['pl_login'] = one_slice['reg_researchers']['hrn']
+ logger.debug("Slabdriver - sliver_status Sliver status \
+ urn %s hrn %s single_slice %s \r\n " \
+ %(slice_urn, slice_hrn, single_slice))
+
+ if 'node_ids' not in single_slice:
+ #No job in the slice
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = []
+ return result
+
+ top_level_status = 'ready'
+
+ #A job is running on Senslab for this slice
+ # report about the local nodes that are in the slice only
+
+ result['geni_urn'] = slice_urn
+
+
+
+ #timestamp = float(sl['startTime']) + float(sl['walltime'])
+ #result['pl_expires'] = strftime(self.time_format, \
+ #gmtime(float(timestamp)))
+ #result['slab_expires'] = strftime(self.time_format,\
+ #gmtime(float(timestamp)))
+
+ resources = []
+ for node in single_slice['node_ids']:
+ res = {}
+ #res['slab_hostname'] = node['hostname']
+ #res['slab_boot_state'] = node['boot_state']
+
+ res['pl_hostname'] = node['hostname']
+ res['pl_boot_state'] = \
+ nodeall_byhostname[node['hostname']]['boot_state']
+ #res['pl_last_contact'] = strftime(self.time_format, \
+ #gmtime(float(timestamp)))
+ sliver_id = Xrn(slice_urn, type='slice', \
+ id=nodeall_byhostname[node['hostname']]['node_id'], \
+ authority=self.hrn).urn
+
+ res['geni_urn'] = sliver_id
+ node_name = node['hostname']
+ if nodeall_byhostname[node_name]['boot_state'] == 'Alive':
+
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+ logger.debug("SLABDRIVER \tsliver_statusresources %s res %s "\
+ %(resources,res))
+ return result
+
+ @staticmethod
+ def get_user_record( hrn):
+ """ Returns the user record based on the hrn from the SFA DB """
+ return dbsession.query(RegRecord).filter_by(hrn = hrn).first()
+
+
+ def testbed_name (self):
+ return self.hrn
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, \
+ users, options):
+ aggregate = SlabAggregate(self)
+
+ slices = SlabSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record = None
+
+ if not isinstance(creds, list):
+ creds = [creds]
+
+ if users:
+ slice_record = users[0].get('slice_record', {})
+ logger.debug("SLABDRIVER.PY \t ===============create_sliver \t\
+ creds %s \r\n \r\n users %s" \
+ %(creds, users))
+ slice_record['user'] = {'keys':users[0]['keys'], \
+ 'email':users[0]['email'], \
+ 'hrn':slice_record['reg-researchers'][0]}
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ logger.debug("SLABDRIVER.PY \t create_sliver \trspec.version \
+ %s slice_record %s users %s" \
+ %(rspec.version,slice_record, users))
+
+
+ # ensure site record exists?
+ # ensure slice record exists
+ #Removed options to verify_slice SA 14/08/12
+ sfa_slice = slices.verify_slice(slice_hrn, slice_record, peer, \
+ sfa_peer)
+
+ # ensure person records exists
+ #verify_persons returns added persons but since the return value
+ #is not used
+ slices.verify_persons(slice_hrn, sfa_slice, users, peer, \
+ sfa_peer, options=options)
+ #requested_attributes returned by rspec.version.get_slice_attributes()
+ #unused, removed SA 13/08/12
+ rspec.version.get_slice_attributes()
+
+ logger.debug("SLABDRIVER.PY create_sliver slice %s " %(sfa_slice))
+
+ # add/remove slice from nodes
+
+ requested_slivers = [node.get('component_id') \
+ for node in rspec.version.get_nodes_with_slivers()\
+ if node.get('authority_id') is self.slab_api.root_auth]
+ l = [ node for node in rspec.version.get_nodes_with_slivers() ]
+ logger.debug("SLADRIVER \tcreate_sliver requested_slivers \
+ requested_slivers %s listnodes %s" \
+ %(requested_slivers,l))
+ #verify_slice_nodes returns nodes, but unused here. Removed SA 13/08/12.
+ #slices.verify_slice_nodes(sfa_slice, requested_slivers, peer)
+
+ # add/remove leases
+ requested_lease_list = []
+
+
+
+ for lease in rspec.version.get_leases():
+ single_requested_lease = {}
+ logger.debug("SLABDRIVER.PY \tcreate_sliver lease %s " %(lease))
+
+ if not lease.get('lease_id'):
+ if get_authority(lease['component_id']) == self.slab_api.root_auth:
+ single_requested_lease['hostname'] = \
+ slab_xrn_to_hostname(\
+ lease.get('component_id').strip())
+ single_requested_lease['start_time'] = \
+ lease.get('start_time')
+ single_requested_lease['duration'] = lease.get('duration')
+ #Check the experiment's duration is valid before adding
+ #the lease to the requested leases list
+ duration_in_seconds = \
+ int(single_requested_lease['duration'])*60
+ if duration_in_seconds > self.slab_api.GetLeaseGranularity():
+ requested_lease_list.append(single_requested_lease)
+
+ #Create dict of leases by start_time, regrouping nodes reserved
+ #at the same
+ #time, for the same amount of time = one job on OAR
+ requested_job_dict = {}
+ for lease in requested_lease_list:
+
+ #In case it is an asap experiment start_time is empty
+ if lease['start_time'] == '':
+ lease['start_time'] = '0'
+
+ if lease['start_time'] not in requested_job_dict:
+ if isinstance(lease['hostname'], str):
+ lease['hostname'] = [lease['hostname']]
+
+ requested_job_dict[lease['start_time']] = lease
+
+ else :
+ job_lease = requested_job_dict[lease['start_time']]
+ if lease['duration'] == job_lease['duration'] :
+ job_lease['hostname'].append(lease['hostname'])
+
+
+
+
+ logger.debug("SLABDRIVER.PY \tcreate_sliver requested_job_dict %s "\
+ %(requested_job_dict))
+ #verify_slice_leases returns the leases , but the return value is unused
+ #here. Removed SA 13/08/12
+ slices.verify_slice_leases(sfa_slice, \
+ requested_job_dict, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, \
+ login=sfa_slice['login'], version=rspec.version)
+
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+
+ sfa_slice_list = self.slab_api.GetSlices(slice_filter = slice_hrn, \
+ slice_filter_type = 'slice_hrn')
+
+ if not sfa_slice_list:
+ return 1
+
+ #Delete all in the slice
+ for sfa_slice in sfa_slice_list:
+
+
+ logger.debug("SLABDRIVER.PY delete_sliver slice %s" %(sfa_slice))
+ slices = SlabSlices(self)
+ # determine if this is a peer slice
+
+ peer = slices.get_peer(slice_hrn)
+ #TODO delete_sliver SA : UnBindObjectFromPeer should be
+ #used when there is another
+ #senslab testbed, which is not the case 14/08/12 .
+
+ logger.debug("SLABDRIVER.PY delete_sliver peer %s \r\n \t sfa_slice %s " %(peer, sfa_slice))
+ try:
+ #if peer:
+ #self.slab_api.UnBindObjectFromPeer('slice', \
+ #sfa_slice['record_id_slice'], \
+ #peer, None)
+ self.slab_api.DeleteSliceFromNodes(sfa_slice)
+ return True
+ except :
+ return False
+ #finally:
+ #if peer:
+ #self.slab_api.BindObjectToPeer('slice', \
+ #sfa_slice['record_id_slice'], \
+ #peer, sfa_slice['peer_slice_id'])
+ #return 1
+
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ #cached_requested = options.get('cached', True)
+
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = \
+ version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_" + \
+ options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_"+options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + str(options.get('geni_available'))
+
+ # look in cache first
+ #if cached_requested and self.cache and not slice_hrn:
+ #rspec = self.cache.get(version_string)
+ #if rspec:
+ #logger.debug("SlabDriver.ListResources: \
+ #returning cached advertisement")
+ #return rspec
+
+ #panos: passing user-defined options
+ aggregate = SlabAggregate(self)
+ #origin_hrn = Credential(string=creds[0]).get_gid_caller().get_hrn()
+ #options.update({'origin_hrn':origin_hrn})
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, \
+ version=rspec_version, options=options)
+
+ # cache the result
+ #if self.cache and not slice_hrn:
+ #logger.debug("Slab.ListResources: stores advertisement in cache")
+ #self.cache.add(version_string, rspec)
+
+ return rspec
+
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ #if self.cache:
+ #slices = self.cache.get('slices')
+ #if slices:
+ #logger.debug("PlDriver.list_slices returns from cache")
+ #return slices
+
+ # get data from db
+
+ slices = self.slab_api.GetSlices()
+ logger.debug("SLABDRIVER.PY \tlist_slices hrn %s \r\n \r\n" %(slices))
+ slice_hrns = [slab_slice['hrn'] for slab_slice in slices]
+
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') \
+ for slice_hrn in slice_hrns]
+
+ # cache the result
+ #if self.cache:
+ #logger.debug ("SlabDriver.list_slices stores value in cache")
+ #self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+
+ def register (self, sfa_record, hrn, pub_key):
+ """
+ Adding new user, slice, node or site should not be handled
+ by SFA.
+
+ Adding nodes = OAR
+ Adding users = LDAP Senslab
+ Adding slice = Import from LDAP users
+ Adding site = OAR
+ """
+ return -1
+
+
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+ """No site or node record update allowed in Senslab."""
+
+ pointer = old_sfa_record['pointer']
+ old_sfa_record_type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and old_sfa_record_type not in [ 'user' ]:
+ raise UnknownSfaType(old_sfa_record_type)
+
+ #if (type == "authority"):
+ #self.shell.UpdateSite(pointer, new_sfa_record)
+
+ if old_sfa_record_type == "slice":
+ slab_record = self.slab_api.sfa_fields_to_slab_fields(old_sfa_record_type, \
+ hrn, new_sfa_record)
+ if 'name' in slab_record:
+ slab_record.pop('name')
+ #Prototype should be UpdateSlice(self,
+ #auth, slice_id_or_name, slice_fields)
+ #Senslab cannot update slice since slice = job
+ #so we must delete and create another job
+ self.slab_api.UpdateSlice(pointer, slab_record)
+
+ elif old_sfa_record_type == "user":
+ update_fields = {}
+ all_fields = new_sfa_record
+ for key in all_fields.keys():
+ if key in ['first_name', 'last_name', 'title', 'email',
+ 'password', 'phone', 'url', 'bio', 'accepted_aup',
+ 'enabled']:
+ update_fields[key] = all_fields[key]
+ self.slab_api.UpdatePerson(pointer, update_fields)
+
+ if new_key:
+ # must check this key against the previous one if it exists
+ persons = self.slab_api.GetPersons(['key_ids'])
+ person = persons[0]
+ keys = person['key_ids']
+ keys = self.slab_api.GetKeys(person['key_ids'])
+
+ # Delete all stale keys
+ key_exists = False
+ for key in keys:
+ if new_key != key['key']:
+ self.slab_api.DeleteKey(key['key_id'])
+ else:
+ key_exists = True
+ if not key_exists:
+ self.slab_api.AddPersonKey(pointer, {'key_type': 'ssh', \
+ 'key': new_key})
+
+
+ return True
+
+
+ def remove (self, sfa_record):
+ sfa_record_type = sfa_record['type']
+ hrn = sfa_record['hrn']
+ if sfa_record_type == 'user':
+
+ #get user from senslab ldap
+ person = self.slab_api.GetPersons(sfa_record)
+ #No registering at a given site in Senslab.
+ #Once registered to the LDAP, all senslab sites are
+ #accesible.
+ if person :
+ #Mark account as disabled in ldap
+ self.slab_api.DeletePerson(sfa_record)
+ elif sfa_record_type == 'slice':
+ if self.slab_api.GetSlices(slice_filter = hrn, \
+ slice_filter_type = 'slice_hrn'):
+ self.slab_api.DeleteSlice(sfa_record)
+
+ #elif type == 'authority':
+ #if self.GetSites(pointer):
+ #self.DeleteSite(pointer)
+
+ return True
+
+
--- /dev/null
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+
+from sfa.util.config import Config
+from sfa.util.sfalogging import logger
+
+from sqlalchemy import Column, Integer, String
+from sqlalchemy import Table, MetaData
+from sqlalchemy.ext.declarative import declarative_base
+
+from sqlalchemy.dialects import postgresql
+
+from sqlalchemy.exc import NoSuchTableError
+
+
+#Dict holding the columns names of the table as keys
+#and their type, used for creation of the table
+slice_table = {'record_id_user': 'integer PRIMARY KEY references X ON DELETE \
+CASCADE ON UPDATE CASCADE','oar_job_id':'integer DEFAULT -1', \
+'record_id_slice':'integer', 'slice_hrn':'text NOT NULL'}
+
+#Dict with all the specific senslab tables
+tablenames_dict = {'slab_xp': slice_table}
+
+
+SlabBase = declarative_base()
+
+
+
+class SenslabXP (SlabBase):
+ """ SQL alchemy class to manipulate slice_senslab table in
+ slab_sfa database.
+
+ """
+ __tablename__ = 'slab_xp'
+
+
+ slice_hrn = Column(String)
+ job_id = Column(Integer, primary_key = True)
+ end_time = Column(Integer, nullable = False)
+
+
+ #oar_job_id = Column( Integer,default = -1)
+ #node_list = Column(postgresql.ARRAY(String), nullable =True)
+
+ def __init__ (self, slice_hrn =None, job_id=None, end_time=None):
+ """
+ Defines a row of the slice_senslab table
+ """
+ if slice_hrn:
+ self.slice_hrn = slice_hrn
+ if job_id :
+ self.job_id = job_id
+ if end_time:
+ self.end_time = end_time
+
+
+ def __repr__(self):
+ """Prints the SQLAlchemy record to the format defined
+ by the function.
+ """
+ result = "<slab_xp : slice_hrn = %s , job_id %s end_time = %s" \
+ %(self.slice_hrn, self.job_id, self.end_time)
+ result += ">"
+ return result
+
+
+
+class SlabDB:
+ """ SQL Alchemy connection class.
+ From alchemy.py
+ """
+ def __init__(self, config, debug = False):
+ self.sl_base = SlabBase
+ dbname = "slab_sfa"
+ if debug == True :
+ l_echo_pool = True
+ l_echo = True
+ else :
+ l_echo_pool = False
+ l_echo = False
+
+ self.slab_session = None
+ # the former PostgreSQL.py used the psycopg2 directly and was doing
+ #self.connection.set_client_encoding("UNICODE")
+ # it's unclear how to achieve this in sqlalchemy, nor if it's needed
+ # at all
+ # http://www.sqlalchemy.org/docs/dialects/postgresql.html#unicode
+ # we indeed have /var/lib/pgsql/data/postgresql.conf where
+ # this setting is unset, it might be an angle to tweak that if need be
+ # try a unix socket first - omitting the hostname does the trick
+ unix_url = "postgresql+psycopg2://%s:%s@:%s/%s"% \
+ (config.SFA_DB_USER, config.SFA_DB_PASSWORD, \
+ config.SFA_DB_PORT, dbname)
+
+ # the TCP fallback method
+ tcp_url = "postgresql+psycopg2://%s:%s@%s:%s/%s"% \
+ (config.SFA_DB_USER, config.SFA_DB_PASSWORD, config.SFA_DB_HOST, \
+ config.SFA_DB_PORT, dbname)
+ for url in [ unix_url, tcp_url ] :
+ try:
+ self.slab_engine = create_engine (url, echo_pool = \
+ l_echo_pool, echo = l_echo)
+ self.check()
+ self.url = url
+ return
+ except:
+ pass
+ self.slab_engine = None
+ raise Exception, "Could not connect to database"
+
+
+
+ def check (self):
+ """ Cehck if a table exists by trying a selection
+ on the table.
+
+ """
+ self.slab_engine.execute ("select 1").scalar()
+
+
+ def session (self):
+ """
+ Creates a SQLalchemy session. Once the session object is created
+ it should be used throughout the code for all the operations on
+ tables for this given database.
+
+ """
+ if self.slab_session is None:
+ Session = sessionmaker()
+ self.slab_session = Session(bind = self.slab_engine)
+ return self.slab_session
+
+ def close_session(self):
+ """
+ Closes connection to database.
+
+ """
+ if self.slab_session is None: return
+ self.slab_session.close()
+ self.slab_session = None
+
+
+ def exists(self, tablename):
+ """
+ Checks if the table specified as tablename exists.
+
+ """
+
+ try:
+ metadata = MetaData (bind=self.slab_engine)
+ table = Table (tablename, metadata, autoload=True)
+ return True
+
+ except NoSuchTableError:
+ logger.log_exc("SLABPOSTGRES tablename %s does not exists" \
+ %(tablename))
+ return False
+
+
+ def createtable(self):
+ """
+ Creates all the table sof the engine.
+ Uses the global dictionnary holding the tablenames and the table schema.
+
+ """
+
+ logger.debug("SLABPOSTGRES createtable SlabBase.metadata.sorted_tables \
+ %s \r\n engine %s" %(SlabBase.metadata.sorted_tables , slab_engine))
+ SlabBase.metadata.create_all(slab_engine)
+ return
+
+
+
+slab_alchemy = SlabDB(Config())
+slab_engine = slab_alchemy.slab_engine
+slab_dbsession = slab_alchemy.session()
--- /dev/null
+from sfa.util.xrn import get_authority, urn_to_hrn
+from sfa.util.sfalogging import logger
+
+
+MAXINT = 2L**31-1
+
+class SlabSlices:
+
+ rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def get_peer(self, xrn):
+ hrn, hrn_type = urn_to_hrn(xrn)
+ #Does this slice belong to a local site or a peer senslab site?
+ peer = None
+
+ # get this slice's authority (site)
+ slice_authority = get_authority(hrn)
+ #Senslab stuff
+ #This slice belongs to the current site
+ if slice_authority == self.driver.slab_api.root_auth:
+ site_authority = slice_authority
+ return None
+
+ site_authority = get_authority(slice_authority).lower()
+ # get this site's authority (sfa root authority or sub authority)
+
+ logger.debug("SLABSLICES \ get_peer slice_authority %s \
+ site_authority %s hrn %s" %(slice_authority, \
+ site_authority, hrn))
+
+
+ # check if we are already peered with this site_authority, if so
+ #peers = self.driver.slab_api.GetPeers({})
+ peers = self.driver.slab_api.GetPeers(peer_filter = site_authority)
+ for peer_record in peers:
+
+ if site_authority == peer_record.hrn:
+ peer = peer_record
+ logger.debug(" SLABSLICES \tget_peer peer %s " %(peer))
+ return peer
+
+ def get_sfa_peer(self, xrn):
+ hrn, hrn_type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+
+ def verify_slice_leases(self, sfa_slice, requested_jobs_dict, peer):
+
+ logger.debug("SLABSLICES verify_slice_leases sfa_slice %s \
+ "%( sfa_slice))
+ #First get the list of current leases from OAR
+ leases = self.driver.slab_api.GetLeases({'name':sfa_slice['hrn']})
+ logger.debug("SLABSLICES verify_slice_leases requested_jobs_dict %s \
+ leases %s "%(requested_jobs_dict, leases ))
+
+ current_nodes_reserved_by_start_time = {}
+ requested_nodes_by_start_time = {}
+ leases_by_start_time = {}
+ reschedule_jobs_dict = {}
+
+
+ #Create reduced dictionary with key start_time and value
+ # the list of nodes
+ #-for the leases already registered by OAR first
+ # then for the new leases requested by the user
+
+ #Leases already scheduled/running in OAR
+ for lease in leases :
+ current_nodes_reserved_by_start_time[lease['t_from']] = \
+ lease['reserved_nodes']
+ leases_by_start_time[lease['t_from']] = lease
+
+
+ #Requested jobs
+ for start_time in requested_jobs_dict:
+ requested_nodes_by_start_time[int(start_time)] = \
+ requested_jobs_dict[start_time]['hostname']
+ #Check if there is any difference between the leases already
+ #registered in OAR and the requested jobs.
+ #Difference could be:
+ #-Lease deleted in the requested jobs
+ #-Added/removed nodes
+ #-Newly added lease
+
+ logger.debug("SLABSLICES verify_slice_leases \
+ requested_nodes_by_start_time %s \
+ "%(requested_nodes_by_start_time ))
+ #Find all deleted leases
+ start_time_list = \
+ list(set(leases_by_start_time.keys()).\
+ difference(requested_nodes_by_start_time.keys()))
+ deleted_leases = [leases_by_start_time[start_time]['lease_id'] \
+ for start_time in start_time_list]
+
+
+
+ #Find added or removed nodes in exisiting leases
+ for start_time in requested_nodes_by_start_time:
+ logger.debug("SLABSLICES verify_slice_leases start_time %s \
+ "%( start_time))
+ if start_time in current_nodes_reserved_by_start_time:
+
+ if requested_nodes_by_start_time[start_time] == \
+ current_nodes_reserved_by_start_time[start_time]:
+ continue
+
+ else:
+ update_node_set = \
+ set(requested_nodes_by_start_time[start_time])
+ added_nodes = \
+ update_node_set.difference(\
+ current_nodes_reserved_by_start_time[start_time])
+ shared_nodes = \
+ update_node_set.intersection(\
+ current_nodes_reserved_by_start_time[start_time])
+ old_nodes_set = \
+ set(\
+ current_nodes_reserved_by_start_time[start_time])
+ removed_nodes = \
+ old_nodes_set.difference(\
+ requested_nodes_by_start_time[start_time])
+ logger.debug("SLABSLICES verify_slice_leases \
+ shared_nodes %s added_nodes %s removed_nodes %s"\
+ %(shared_nodes, added_nodes,removed_nodes ))
+ #If the lease is modified, delete it before
+ #creating it again.
+ #Add the deleted lease job id in the list
+ #WARNING :rescheduling does not work if there is already
+ # 2 running/scheduled jobs because deleting a job
+ #takes time SA 18/10/2012
+ if added_nodes or removed_nodes:
+ deleted_leases.append(\
+ leases_by_start_time[start_time]['lease_id'])
+ #Reschedule the job
+ if added_nodes or shared_nodes:
+ reschedule_jobs_dict[str(start_time)] = \
+ requested_jobs_dict[str(start_time)]
+
+ else:
+ #New lease
+
+ job = requested_jobs_dict[str(start_time)]
+ logger.debug("SLABSLICES \
+ NEWLEASE slice %s job %s"\
+ %(sfa_slice, job))
+ self.driver.slab_api.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
+
+ #Deleted leases are the ones with lease id not declared in the Rspec
+ if deleted_leases:
+ self.driver.slab_api.DeleteLeases(deleted_leases, sfa_slice['hrn'])
+ logger.debug("SLABSLICES \
+ verify_slice_leases slice %s deleted_leases %s"\
+ %(sfa_slice, deleted_leases))
+
+
+ if reschedule_jobs_dict :
+ for start_time in reschedule_jobs_dict:
+ job = reschedule_jobs_dict[start_time]
+ self.driver.slab_api.AddLeases(job['hostname'], \
+ sfa_slice, int(job['start_time']), \
+ int(job['duration']))
+ return leases
+
+ def verify_slice_nodes(self, sfa_slice, requested_slivers, peer):
+ current_slivers = []
+ deleted_nodes = []
+
+ if 'node_ids' in sfa_slice:
+ nodes = self.driver.slab_api.GetNodes(sfa_slice['list_node_ids'], \
+ ['hostname'])
+ current_slivers = [node['hostname'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).\
+ difference(requested_slivers))
+ # add nodes from rspec
+ #added_nodes = list(set(requested_slivers).\
+ #difference(current_slivers))
+
+
+ logger.debug("SLABSLICES \tverify_slice_nodes slice %s\
+ \r\n \r\n deleted_nodes %s"\
+ %(sfa_slice, deleted_nodes))
+
+ if deleted_nodes:
+ #Delete the entire experience
+ self.driver.slab_api.DeleteSliceFromNodes(sfa_slice)
+ #self.driver.DeleteSliceFromNodes(sfa_slice['slice_hrn'], \
+ #deleted_nodes)
+ return nodes
+
+
+
+ def free_egre_key(self):
+ used = set()
+ for tag in self.driver.slab_api.GetSliceTags({'tagname': 'egre_key'}):
+ used.add(int(tag['value']))
+
+ for i in range(1, 256):
+ if i not in used:
+ key = i
+ break
+ else:
+ raise KeyError("No more EGRE keys available")
+
+ return str(key)
+
+
+
+
+
+
+ def handle_peer(self, site, sfa_slice, persons, peer):
+ if peer:
+ # bind site
+ try:
+ if site:
+ self.driver.slab_api.BindObjectToPeer('site', site['site_id'], \
+ peer['shortname'], sfa_slice['site_id'])
+ except Exception, error:
+ self.driver.slab_api.DeleteSite(site['site_id'])
+ raise error
+
+ # bind slice
+ try:
+ if sfa_slice:
+ self.driver.slab_api.BindObjectToPeer('slice', slice['slice_id'], \
+ peer['shortname'], sfa_slice['slice_id'])
+ except Exception, error:
+ self.driver.slab_api.DeleteSlice(sfa_slice['slice_id'])
+ raise error
+
+ # bind persons
+ for person in persons:
+ try:
+ self.driver.slab_api.BindObjectToPeer('person', \
+ person['person_id'], peer['shortname'], \
+ person['peer_person_id'])
+
+ for (key, remote_key_id) in zip(person['keys'], \
+ person['key_ids']):
+ try:
+ self.driver.slab_api.BindObjectToPeer( 'key', \
+ key['key_id'], peer['shortname'], \
+ remote_key_id)
+ except:
+ self.driver.slab_api.DeleteKey(key['key_id'])
+ logger.log_exc("failed to bind key: %s \
+ to peer: %s " % (key['key_id'], \
+ peer['shortname']))
+ except Exception, error:
+ self.driver.slab_api.DeletePerson(person['person_id'])
+ raise error
+
+ return sfa_slice
+
+ #def verify_site(self, slice_xrn, slice_record={}, peer=None, \
+ #sfa_peer=None, options={}):
+ #(slice_hrn, type) = urn_to_hrn(slice_xrn)
+ #site_hrn = get_authority(slice_hrn)
+ ## login base can't be longer than 20 characters
+ ##slicename = hrn_to_pl_slicename(slice_hrn)
+ #authority_name = slice_hrn.split('.')[0]
+ #login_base = authority_name[:20]
+ #logger.debug(" SLABSLICES.PY \tverify_site authority_name %s \
+ #login_base %s slice_hrn %s" \
+ #%(authority_name,login_base,slice_hrn)
+
+ #sites = self.driver.slab_api.GetSites(login_base)
+ #if not sites:
+ ## create new site record
+ #site = {'name': 'geni.%s' % authority_name,
+ #'abbreviated_name': authority_name,
+ #'login_base': login_base,
+ #'max_slices': 100,
+ #'max_slivers': 1000,
+ #'enabled': True,
+ #'peer_site_id': None}
+ #if peer:
+ #site['peer_site_id'] = slice_record.get('site_id', None)
+ #site['site_id'] = self.driver.slab_api.AddSite(site)
+ ## exempt federated sites from monitor policies
+ #self.driver.slab_api.AddSiteTag(site['site_id'], 'exempt_site_until', \
+ #"20200101")
+
+ ### is this still necessary?
+ ### add record to the local registry
+ ##if sfa_peer and slice_record:
+ ##peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+ ##'peer_authority': sfa_peer, 'pointer': \
+ #site['site_id']}
+ ##self.registry.register_peer_object(self.credential, peer_dict)
+ #else:
+ #site = sites[0]
+ #if peer:
+ ## unbind from peer so we can modify if necessary.
+ ## Will bind back later
+ #self.driver.slab_api.UnBindObjectFromPeer('site', site['site_id'], \
+ #peer['shortname'])
+
+ #return site
+
+ def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer):
+
+ #login_base = slice_hrn.split(".")[0]
+ slicename = slice_hrn
+ slices_list = self.driver.slab_api.GetSlices(slice_filter = slicename, \
+ slice_filter_type = 'slice_hrn')
+ sfa_slice = None
+ if slices_list:
+ for sl in slices_list:
+
+ logger.debug("SLABSLICE \tverify_slice slicename %s slices_list %s sl %s \
+ slice_record %s"%(slicename, slices_list,sl, \
+ slice_record))
+ sfa_slice = sl
+ sfa_slice.update(slice_record)
+ #del slice['last_updated']
+ #del slice['date_created']
+ #if peer:
+ #slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ ## unbind from peer so we can modify if necessary.
+ ## Will bind back later
+ #self.driver.slab_api.UnBindObjectFromPeer('slice', \
+ #slice['slice_id'], \
+ #peer['shortname'])
+ #Update existing record (e.g. expires field)
+ #it with the latest info.
+ ##if slice_record and slice['expires'] != slice_record['expires']:
+ ##self.driver.slab_api.UpdateSlice( slice['slice_id'], {'expires' : \
+ #slice_record['expires']})
+ else:
+ #Search for user in ldap based on email SA 14/11/12
+ ldap_user = self.driver.slab_api.ldap.LdapFindUser(slice_record['user'])
+ logger.debug(" SLABSLICES \tverify_slice Oups \
+ slice_record %s peer %s sfa_peer %s ldap_user %s"\
+ %(slice_record, peer,sfa_peer ,ldap_user ))
+ #User already registered in ldap, meaning user should be in SFA db
+ #and hrn = sfa_auth+ uid
+ sfa_slice = {'hrn': slicename,
+ #'url': slice_record.get('url', slice_hrn),
+ #'description': slice_record.get('description', slice_hrn)
+ 'node_list' : [],
+ 'authority' : slice_record['authority'],
+ 'gid':slice_record['gid'],
+ #'record_id_user' : user.record_id,
+ 'slice_id' : slice_record['record_id'],
+ 'reg-researchers':slice_record['reg-researchers'],
+ #'record_id_slice': slice_record['record_id'],
+ 'peer_authority':str(sfa_peer)
+
+ }
+ if ldap_user :
+ hrn = self.driver.slab_api.root_auth +'.'+ ldap_user['uid']
+
+ user = self.driver.get_user_record(hrn)
+
+ logger.debug(" SLABSLICES \tverify_slice hrn %s USER %s" %(hrn, user))
+ #sfa_slice = {'slice_hrn': slicename,
+ ##'url': slice_record.get('url', slice_hrn),
+ ##'description': slice_record.get('description', slice_hrn)
+ #'node_list' : [],
+ #'authority' : slice_record['authority'],
+ #'gid':slice_record['gid'],
+ ##'record_id_user' : user.record_id,
+ #'slice_id' : slice_record['record_id'],
+ #'reg-researchers':slice_record['reg-researchers'],
+ ##'record_id_slice': slice_record['record_id'],
+ #'peer_authority':str(peer.hrn)
+
+ #}
+ # add the slice
+ if sfa_slice :
+ self.driver.slab_api.AddSlice(sfa_slice, user)
+
+ if peer:
+ sfa_slice['slice_id'] = slice_record['record_id']
+
+ #slice['slice_id'] = self.driver.slab_api.AddSlice(slice)
+ logger.debug("SLABSLICES \tverify_slice ADDSLICE OK")
+ #slice['node_ids']=[]
+ #slice['person_ids'] = []
+ #if peer:
+ #sfa_slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ # mark this slice as an sfa peer record
+ #if sfa_peer:
+ #peer_dict = {'type': 'slice', 'hrn': slice_hrn,
+ #'peer_authority': sfa_peer, 'pointer': \
+ #slice['slice_id']}
+ #self.registry.register_peer_object(self.credential, peer_dict)
+
+
+
+ return sfa_slice
+
+
+ def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, \
+ options={}):
+ """
+ users is a record list. Records can either be local records
+ or users records from known and trusted federated sites.
+ If the user is from another site that senslab doesn't trust yet,
+ then Resolve will raise an error before getting to create_sliver.
+ """
+ #TODO SA 21/08/12 verify_persons Needs review
+
+ logger.debug("SLABSLICES \tverify_persons \tslice_hrn %s \t slice_record %s\r\n users %s \t peer %s "%( slice_hrn, slice_record, users, peer))
+ users_by_id = {}
+ #users_by_hrn = {}
+ users_by_email = {}
+ #users_dict : dict whose keys can either be the user's hrn or its id.
+ #Values contains only id and hrn
+ users_dict = {}
+
+ #First create dicts by hrn and id for each user in the user record list:
+ for info in users:
+
+ if 'slice_record' in info :
+ slice_rec = info['slice_record']
+ user = slice_rec['user']
+
+ if 'email' in user:
+ users_by_email[user['email']] = user
+ users_dict[user['email']] = user
+
+ #if 'hrn' in user:
+ #users_by_hrn[user['hrn']] = user
+ #users_dict[user['hrn']] = user
+
+ logger.debug( "SLABSLICE.PY \t verify_person \
+ users_dict %s \r\n user_by_email %s \r\n \
+ \tusers_by_id %s " \
+ %(users_dict,users_by_email, users_by_id))
+
+ existing_user_ids = []
+ #existing_user_hrns = []
+ existing_user_emails = []
+ existing_users = []
+ # Check if user is in Senslab LDAP using its hrn.
+ # Assuming Senslab is centralised : one LDAP for all sites,
+ # user_id unknown from LDAP
+ # LDAP does not provide users id, therefore we rely on hrns containing
+ # the login of the user.
+ # If the hrn is not a senslab hrn, the user may not be in LDAP.
+ #if users_by_hrn:
+ if users_by_email :
+ #Construct the list of filters (list of dicts) for GetPersons
+ filter_user = []
+ #for hrn in users_by_hrn:
+ for email in users_by_email :
+ #filter_user.append (users_by_hrn[hrn])
+ filter_user.append (users_by_email[email])
+ #Check user's in LDAP with GetPersons
+ #Needed because what if the user has been deleted in LDAP but
+ #is still in SFA?
+ existing_users = self.driver.slab_api.GetPersons(filter_user)
+ logger.debug(" \r\n SLABSLICE.PY \tverify_person filter_user %s existing_users %s " \
+ %(filter_user, existing_users))
+ #User's in senslab LDAP
+ if existing_users:
+ for user in existing_users :
+ users_dict[user['email']].update(user)
+ existing_user_emails.append(users_dict[user['email']]['email'])
+
+ #existing_user_hrns.append(users_dict[user['hrn']]['hrn'])
+ #existing_user_ids.\
+ #append(users_dict[user['hrn']]['person_id'])
+
+ # User from another known trusted federated site. Check
+ # if a senslab account matching the email has already been created.
+ else:
+ req = 'mail='
+ if isinstance(users, list):
+
+ req += users[0]['email']
+ else:
+ req += users['email']
+
+ ldap_reslt = self.driver.slab_api.ldap.LdapSearch(req)
+ if ldap_reslt:
+ logger.debug(" SLABSLICE.PY \tverify_person users \
+ USER already in Senslab \t ldap_reslt %s \
+ "%( ldap_reslt))
+ existing_users.append(ldap_reslt[1])
+
+ else:
+ #User not existing in LDAP
+ #TODO SA 21/08/12 raise smthg to add user or add it auto ?
+ #new_record = {}
+ #new_record['pkey'] = users[0]['keys'][0]
+ #new_record['mail'] = users[0]['email']
+
+ logger.debug(" SLABSLICE.PY \tverify_person users \
+ not in ldap ...NEW ACCOUNT NEEDED %s \r\n \t \
+ ldap_reslt %s " %(users, ldap_reslt))
+
+ #requested_user_ids = users_by_id.keys()
+ #requested_user_hrns = users_by_hrn.keys()
+ requested_user_emails = users_by_email.keys()
+ logger.debug("SLABSLICE.PY \tverify_person \
+ users_by_email %s " %( users_by_email))
+ #logger.debug("SLABSLICE.PY \tverify_person \
+ #user_by_hrn %s " %( users_by_hrn))
+
+
+ #Check that the user of the slice in the slice record
+ #matches the existing users
+ try:
+ if slice_record['PI'][0] in requested_user_hrns:
+ #if slice_record['record_id_user'] in requested_user_ids and \
+ #slice_record['PI'][0] in requested_user_hrns:
+ logger.debug(" SLABSLICE \tverify_person ['PI'] slice_record %s" \
+ %(slice_record))
+
+ except KeyError:
+ pass
+
+
+ # users to be added, removed or updated
+ #One user in one senslab slice : there should be no need
+ #to remove/ add any user from/to a slice.
+ #However a user from SFA which is not registered in Senslab yet
+ #should be added to the LDAP.
+ added_user_emails = set(requested_user_emails).\
+ difference(set(existing_user_emails))
+ #added_user_hrns = set(requested_user_hrns).\
+ #difference(set(existing_user_hrns))
+
+ #self.verify_keys(existing_slice_users, updated_users_list, \
+ #peer, append)
+
+ added_persons = []
+ # add new users
+
+ #requested_user_email is in existing_user_emails
+ if len(added_user_emails) == 0:
+
+ slice_record['login'] = users_dict[requested_user_emails[0]]['uid']
+ logger.debug(" SLABSLICE \tverify_person QUICK DIRTY %s" \
+ %(slice_record))
+
+ #for added_user_hrn in added_user_hrns:
+ #added_user = users_dict[added_user_hrn]
+
+
+ for added_user_email in added_user_emails:
+ #hrn, type = urn_to_hrn(added_user['urn'])
+ added_user = users_dict[added_user_email]
+ logger.debug(" SLABSLICE \r\n \r\n \t THE SECOND verify_person added_user %s" %(added_user))
+ person = {}
+ person['peer_person_id'] = None
+ k_list = ['first_name','last_name','person_id']
+ for k in k_list:
+ if k in added_user:
+ person[k] = added_user[k]
+
+ person['pkey'] = added_user['keys'][0]
+ person['mail'] = added_user['email']
+ person['email'] = added_user['email']
+ person['key_ids'] = added_user.get('key_ids', [])
+ #person['urn'] = added_user['urn']
+
+ #person['person_id'] = self.driver.slab_api.AddPerson(person)
+ person['uid'] = self.driver.slab_api.AddPerson(person)
+
+ logger.debug(" SLABSLICE \r\n \r\n \t THE SECOND verify_person ppeersonne %s" %(person))
+ #Update slice_Record with the id now known to LDAP
+ slice_record['login'] = person['uid']
+ #slice_record['reg_researchers'] = [self.driver.slab_api.root_auth + '.' + person['uid']]
+ #slice_record['reg-researchers'] = slice_record['reg_researchers']
+
+ #if peer:
+ #person['peer_person_id'] = added_user['person_id']
+ added_persons.append(person)
+
+ # enable the account
+ #self.driver.slab_api.UpdatePerson(slice_record['reg_researchers'][0], added_user_email)
+
+ # add person to site
+ #self.driver.slab_api.AddPersonToSite(added_user_id, login_base)
+
+ #for key_string in added_user.get('keys', []):
+ #key = {'key':key_string, 'key_type':'ssh'}
+ #key['key_id'] = self.driver.slab_api.AddPersonKey(person['person_id'], \
+ # key)
+ #person['keys'].append(key)
+
+ # add the registry record
+ #if sfa_peer:
+ #peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': \
+ #sfa_peer, \
+ #'pointer': person['person_id']}
+ #self.registry.register_peer_object(self.credential, peer_dict)
+ #for added_slice_user_hrn in \
+ #added_slice_user_hrns.union(added_user_hrns):
+ #self.driver.slab_api.AddPersonToSlice(added_slice_user_hrn, \
+ #slice_record['name'])
+ #for added_slice_user_id in \
+ #added_slice_user_ids.union(added_user_ids):
+ # add person to the slice
+ #self.driver.slab_api.AddPersonToSlice(added_slice_user_id, \
+ #slice_record['name'])
+ # if this is a peer record then it
+ # should already be bound to a peer.
+ # no need to return worry about it getting bound later
+
+ return added_persons
+
+ #Unused
+ def verify_keys(self, persons, users, peer, options={}):
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.slab_api.GetKeys(key_ids, ['key_id', 'key'])
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ try:
+ if peer:
+ person = persondict[user['email']]
+ self.driver.slab_api.UnBindObjectFromPeer('person', \
+ person['person_id'], peer['shortname'])
+ key['key_id'] = \
+ self.driver.slab_api.AddPersonKey(user['email'], key)
+ if peer:
+ key_index = user_keys.index(key['key'])
+ remote_key_id = user['key_ids'][key_index]
+ self.driver.slab_api.BindObjectToPeer('key', \
+ key['key_id'], peer['shortname'], \
+ remote_key_id)
+
+ finally:
+ if peer:
+ self.driver.slab_api.BindObjectToPeer('person', \
+ person['person_id'], peer['shortname'], \
+ user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for existing_key_id in keydict:
+ if keydict[existing_key_id] in removed_keys:
+
+ if peer:
+ self.driver.slab_api.UnBindObjectFromPeer('key', \
+ existing_key_id, peer['shortname'])
+ self.driver.slab_api.DeleteKey(existing_key_id)
+
+
+ #def verify_slice_attributes(self, slice, requested_slice_attributes, \
+ #append=False, admin=False):
+ ## get list of attributes users ar able to manage
+ #filter = {'category': '*slice*'}
+ #if not admin:
+ #filter['|roles'] = ['user']
+ #slice_attributes = self.driver.slab_api.GetTagTypes(filter)
+ #valid_slice_attribute_names = [attribute['tagname'] \
+ #for attribute in slice_attributes]
+
+ ## get sliver attributes
+ #added_slice_attributes = []
+ #removed_slice_attributes = []
+ #ignored_slice_attribute_names = []
+ #existing_slice_attributes = self.driver.slab_api.GetSliceTags({'slice_id': \
+ #slice['slice_id']})
+
+ ## get attributes that should be removed
+ #for slice_tag in existing_slice_attributes:
+ #if slice_tag['tagname'] in ignored_slice_attribute_names:
+ ## If a slice already has a admin only role
+ ## it was probably given to them by an
+ ## admin, so we should ignore it.
+ #ignored_slice_attribute_names.append(slice_tag['tagname'])
+ #else:
+ ## If an existing slice attribute was not
+ ## found in the request it should
+ ## be removed
+ #attribute_found=False
+ #for requested_attribute in requested_slice_attributes:
+ #if requested_attribute['name'] == slice_tag['tagname'] \
+ #and requested_attribute['value'] == slice_tag['value']:
+ #attribute_found=True
+ #break
+
+ #if not attribute_found and not append:
+ #removed_slice_attributes.append(slice_tag)
+
+ ## get attributes that should be added:
+ #for requested_attribute in requested_slice_attributes:
+ ## if the requested attribute wasn't found we should add it
+ #if requested_attribute['name'] in valid_slice_attribute_names:
+ #attribute_found = False
+ #for existing_attribute in existing_slice_attributes:
+ #if requested_attribute['name'] == \
+ #existing_attribute['tagname'] and \
+ #requested_attribute['value'] == \
+ #existing_attribute['value']:
+ #attribute_found=True
+ #break
+ #if not attribute_found:
+ #added_slice_attributes.append(requested_attribute)
+
+
+ ## remove stale attributes
+ #for attribute in removed_slice_attributes:
+ #try:
+ #self.driver.slab_api.DeleteSliceTag(attribute['slice_tag_id'])
+ #except Exception, error:
+ #self.logger.warn('Failed to remove sliver attribute. name: \
+ #%s, value: %s, node_id: %s\nCause:%s'\
+ #% (name, value, node_id, str(error)))
+
+ ## add requested_attributes
+ #for attribute in added_slice_attributes:
+ #try:
+ #self.driver.slab_api.AddSliceTag(slice['name'], attribute['name'], \
+ #attribute['value'], attribute.get('node_id', None))
+ #except Exception, error:
+ #self.logger.warn('Failed to add sliver attribute. name: %s, \
+ #value: %s, node_id: %s\nCause:%s'\
+ #% (name, value, node_id, str(error)))
+
+
code = {
'geni_code': GENICODE.SUCCESS,
'am_type': 'sfa',
- 'am_code': None,
}
if isinstance(result, SfaFault):
code['geni_code'] = result.faultCode
(config.SFA_DB_USER,config.SFA_DB_PASSWORD,config.SFA_DB_HOST,config.SFA_DB_PORT,dbname)
for url in [ unix_url, tcp_url ] :
try:
+ logger.debug("Trying db URL %s"%url)
self.engine = create_engine (url)
self.check()
self.url=url
except:
pass
self.engine=None
- raise Exception,"Could not connect to database"
-
+ raise Exception,"Could not connect to database %s as %s with psycopg2"%(dbname,config.SFA_DB_USER)
+
# expects boolean True: debug is ON or False: debug is OFF
def debug (self, echo):
logger.info("load from xml, keys=%s"%xml_dict.keys())
return make_record_dict (xml_dict)
+####################
+# augment local records with data from builtin relationships
+# expose related objects as a list of hrns
+# we pick names that clearly won't conflict with the ones used in the old approach,
+# were the relationships data came from the testbed side
+# for each type, a dict of the form {<field-name-exposed-in-record>:<alchemy_accessor_name>}
+# so after that, an 'authority' record will e.g. have a 'reg-pis' field with the hrns of its pi-users
+augment_map={'authority': {'reg-pis':'reg_pis',},
+ 'slice': {'reg-researchers':'reg_researchers',},
+ 'user': {'reg-pi-authorities':'reg_authorities_as_pi',
+ 'reg-slices':'reg_slices_as_researcher',},
+ }
+
+def augment_with_sfa_builtins (local_record):
+ # don't ruin the import of that file in a client world
+ from sfa.util.xrn import Xrn
+ # add a 'urn' field
+ setattr(local_record,'reg-urn',Xrn(xrn=local_record.hrn,type=local_record.type).urn)
+ # users have keys and this is needed to synthesize 'users' sent over to CreateSliver
+ if local_record.type=='user':
+ user_keys = [ key.key for key in local_record.reg_keys ]
+ setattr(local_record, 'reg-keys', user_keys)
+ # search in map according to record type
+ type_map=augment_map.get(local_record.type,{})
+ # use type-dep. map to do the job
+ for (field_name,attribute) in type_map.items():
+ # get related objects
+ related_records = getattr(local_record,attribute,[])
+ hrns = [ r.hrn for r in related_records ]
+ setattr (local_record, field_name, hrns)
+
+
# fallback
return "** undef_datetime **"
- def todict (self):
+ # it may be important to exclude relationships, which fortunately
+ #
+ def todict (self, exclude_types=[]):
d=self.__dict__
- keys=[k for k in d.keys() if not k.startswith('_')]
+ def exclude (k,v):
+ if k.startswith('_'): return True
+ if exclude_types:
+ for exclude_type in exclude_types:
+ if isinstance (v,exclude_type): return True
+ return False
+ keys=[k for (k,v) in d.items() if not exclude(k,v)]
return dict ( [ (k,d[k]) for k in keys ] )
def toxml(self):
print " gidIssuer:"
self.get_signature().get_issuer_gid().dump(8, dump_parents)
+ if self.expiration:
+ print " expiration:", self.expiration.isoformat()
+
gidObject = self.get_gid_object()
if gidObject:
result += " gidObject:\n"
# of type None
urn = hrn_to_urn(hrn, type)
gid = GID(subject=hrn, uuid=uuid, hrn=hrn, urn=urn, email=email)
-
# is this a CA cert
if hrn == self.config.SFA_INTERFACE_HRN or not parent_hrn:
# root or sub authority
def __call__(self, *args, **kwds):
"""
- Main entry point for all SfaAPI functions. Type checks
+ Main entry point for all SFA API functions. Type checks
arguments, authenticates, and executes call().
"""
--- /dev/null
+This location is a placeholder for any specifics about
+e.g. deployments or test scripts that do not belong in sfa/ because we
+do not want them to be packaged.
--- /dev/null
+###########################################################################
+# Copyright (C) 2012 by
+# <savakian@sfa2.grenoble.senslab.info>
+#
+# Copyright: See COPYING file that comes with this distribution
+#
+###########################################################################
+#LDAP import
+from sfa.senslab.LDAPapi import LDAPapi
+import ldap.modlist as modlist
+
+
+#logger sfa
+from sfa.util.sfalogging import logger
+
+#OAR imports
+from datetime import datetime
+from sfa.senslab.OARrestapi import OARrestapi
+
+#Test slabdriver
+from sfa.senslab.slabdriver import SlabDriver
+from sfa.util.config import Config
+
+
+import os
+import sys
+
+
+
+def parse_options():
+
+ #arguments supplied
+ if len(sys.argv) > 1 :
+ options_list = sys.argv[1:]
+ #For each valid option, execute the associated function
+ #(defined in the dictionnary supported_options)
+ job_id = 1
+ valid_options_dict = {}
+ value_list = []
+ #Passing options to the script should be done like this :
+ #-10 OAR -2 SlabDriver
+ for option in options_list:
+ if option in supported_options:
+ #update the values used for the fonctions associated
+ #with the options
+
+ valid_options_dict[option] = value_list
+ #empty the values list for next option
+ value_list = []
+ print valid_options_dict
+ else:
+ if option[0] == '-':
+ value_list.append(option[1:])
+ print "value_list", value_list
+
+
+ return valid_options_dict
+
+def TestLdap(job_id = None):
+ logger.setLevelDebug()
+
+ ldap_server = LDAPapi()
+ ret = ldap_server.conn.connect(bind=True)
+ ldap_server.conn.close()
+ print "TEST ldap_server.conn.connect(bind=True)" , ret
+
+ ret = ldap_server.conn.connect(bind=False)
+ ldap_server.conn.close()
+ print "TEST ldap_server.conn.connect(bind=False)", ret
+
+
+ ret = ldap_server.LdapSearch()
+ print "TEST ldap_server.LdapSearch ALL", ret
+
+ ret = ldap_server.LdapSearch('(uid=avakian)', [])
+ print "\r\n TEST ldap_server.LdapSearch ids = avakian", ret
+
+
+ password = ldap_server.generate_password()
+ print "\r\n TEST generate_password ", password
+
+ maxi = ldap_server.find_max_uidNumber()
+ print "\r\n TEST find_max_uidNumber " , maxi
+
+ data = {}
+ data['last_name'] = "Drake"
+ data['first_name'] = "Tim"
+ data['givenName'] = data['first_name']
+ data['mail'] = "robin@arkham.fr"
+
+ record = {}
+ record['hrn'] = 'senslab.drake'
+ record['last_name'] = "Drake"
+ record['first_name'] = "Tim"
+ record['mail'] = "robin@arkham.fr"
+
+
+ login = ldap_server.generate_login(data)
+ print "\r\n Robin \tgenerate_login ", ret, login
+
+ ret = ldap_server.LdapAddUser(data)
+ print "\r\n Robin \tLdapAddUser ", ret
+
+ req_ldap = '(uid=' + login + ')'
+ ret = ldap_server.LdapSearch(req_ldap, [])
+ print "\r\n Robin \tldap_server.LdapSearch ids = %s %s" % (login, ret)
+
+ password = "Thridrobin"
+ enc = ldap_server.encrypt_password(password)
+ print "\r\n Robin \tencrypt_password ", enc
+
+ ret = ldap_server.LdapModifyUser(record, {'userPassword':enc})
+ print "\r\n Robin \tChange password LdapModifyUser ", ret
+
+ #dn = 'uid=' + login + ',' + ldap_server.baseDN
+ #ret = ldap_server.LdapDelete(dn)
+ #print "\r\n Robin \tLdapDelete ", ret
+
+ datanight = {}
+ datanight['last_name'] = "Grayson"
+ datanight['first_name'] = "Dick"
+ datanight['givenName'] = datanight['first_name']
+ datanight['mail'] = "nightwing@arkham.fr"
+
+
+ record_night = {}
+ record_night['hrn'] = 'senslab.grayson'
+ record_night['last_name'] = datanight['last_name']
+ record_night['first_name'] = datanight['first_name']
+ record_night['mail'] = datanight['mail']
+
+ ret = ldap_server.LdapFindUser(record_night)
+ print "\r\n Nightwing \tldap_server.LdapFindUser %s : %s" % (record_night, ret)
+
+ #ret = ldap_server.LdapSearch('(uid=grayson)', [])
+ #print "\r\n Nightwing \tldap_server.LdapSearch ids = %s %s" %('grayson',ret )
+
+ #ret = ldap_server.LdapAddUser(datanight)
+ #print "\r\n Nightwing \tLdapAddUser ", ret
+
+ #ret = ldap_server.LdapResetPassword(record_night)
+ #print "\r\n Nightwing \tLdapResetPassword de %s : %s" % (record_night, ret)
+
+ ret = ldap_server.LdapDeleteUser(record_night)
+ print "\r\n Nightwing \tLdapDeleteUser ", ret
+
+
+ #record_avakian = {}
+ #record_avakian['hrn']= 'senslab.avakian'
+ #record_avakian['last_name'] = 'avakian'
+ #record_avakian['first_name'] = 'sandrine'
+ #record_avakian['mail'] = 'sandrine.avakian@inria.fr'
+ #pubkey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAwSUkJ+cr3xM47h8lFkIXJoJhg4wHakTaLJmgTXkzvUmQsQeFB2MjUZ6WAelMXj/EFz2+XkK+bcWNXwfbrLptJQ+XwGpPZlu9YV/kzO63ghVrAyEg0+p7Pn1TO9f1ZYg4R6JfP/3qwH1AsE+X3PNpIewsuEIKwd2wUCJDf5RXJTpl39GizcBFemrRqgs0bdqAN/vUT9YvtWn8fCYR5EfJHVXOK8P1KmnbuGZpk7ryz21pDMlgw13+8aYB+LPkxdv5zG54A5c6o9N3zOCblvRFWaNBqathS8y04cOYWPmyu+Q0Xccwi7vM3Ktm8RoJw+raQNwsmneJOm6KXKnjoOQeiQ== savakian@sfa2.grenoble.senslab.info"
+ #ret = ldap_server.LdapModifyUser(record_night, {'sshPublicKey':pubkey})
+ #print "\r\n Sandrine \tChange pubkey LdapModifyUser ", ret
+
+ #record_myslice = {}
+ #record_myslice['hrn']= 'senslab.myslice'
+ #record_myslice['last_name'] = 'myslice'
+ #record_myslice['first_name'] = 'myslice'
+ #record_myslice['mail'] = 'nturro@inria.fr'
+ #pubkeymyslice = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAuyRPwn8PZxjdhu+ciRuPyM0eVBn7XS7i3tym9F30UVhaCd09a/UEmGn7WJZdfsxV3hXqG1Wc766FEst97NuzHzELSuvy/rT96J0UHG4wae4pnzOLd6NwFdZh7pkPsgHMHxK9ALVE68Puu+EDSOB5bBZ9Q624wCIGxEpmuS/+X+dDBTKgG5Hi0WA1uKJwhLSbbXb38auh4FlYgXPsdpljTIJatt+zGL0Zsy6fdrsVRc5W8kr3/SmE4OMNyabKBNyxioSEuYhRSjoQAHnYoevEjZniP8IzscKK7qwelzGUfnJEzexikhsQamhAFti2ReiFfoHBRZxnSc49ioH7Kaci5w== root@rhoecos3.ipv6.lip6.fr"
+
+ #pubkeytestuser = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDYS8tzufciTm6GdNUGHQc64OfTxFebMYUwh/Jl04IPTvjjr26uakbM0M2v33HxZ5Q7PnmPN9pB/w+a+f7a7J4cNs/tApOMg2hb6UrLaOrdnDMOs4KZlfElyDsF3Zx5QwxPYvzsKADAbDVoX4NF9PttuDLdm2l3nLSvm89jfla00GBg+K8grdOCHyYZVX/Wt7kxhXDK3AidQhKJgn+iD5GxvtWMBE+7S5kJGdRW1W10lSLBW3+VNsCrKJB2s8L55Xz/l2HNBScU7T0VcMQJrFxEXKzLPagZsMz0lfLzHESoGHIZ3Tz85DfECbTtMxLts/4KoAEc3EE+PYr2VDeAggDx testuser@myslice"
+
+
+
+ #password = "ReptileFight"
+ #enc = ldap_server.encrypt_password(password)
+ #print "\r\n sandrine \tencrypt_password ", enc
+
+ #ret = ldap_server.LdapModifyUser(record_avakian, {'userPassword':enc})
+ #print "\r\n sandrine \tChange password LdapModifyUser ", ret
+ return
+
+
+def get_stuff(oar, uri):
+ import httplib
+ import json
+ headers = {}
+ data = json.dumps({})
+
+ headers['X-REMOTE_IDENT'] = 'avakian'
+
+ headers['content-length'] = '0' #seems that it does not work if we don't add this
+
+
+ conn = httplib.HTTPConnection(oar.oarserver['ip'], oar.oarserver['port'])
+ conn.request("GET", uri, data , headers )
+ resp = ( conn.getresponse()).read()
+
+ conn.close()
+
+
+ js = json.loads(resp)
+ return js
+
+
+
+
+def TestOAR(job_id = None):
+ print "JOB_ID", job_id
+ if isinstance(job_id, list) :
+ if len(job_id) >= 1:
+ job_id = job_id[0]
+ else:
+ job_id = '1'
+ else:
+ job_id = '1'
+ print "JOB_ID", job_id
+ oar = OARrestapi()
+ jobs = oar.parser.SendRequest("GET_reserved_nodes", username = 'avakian')
+ print "\r\n OAR GET_reserved_nodes ", jobs
+
+
+ jobs = oar.parser.SendRequest("GET_jobs")
+ print "\r\n OAR GET_jobs ", jobs
+
+
+ jobs = oar.parser.SendRequest("GET_jobs_id", job_id, 'avakian')
+ print "\r\n OAR GET_jobs_id ", jobs
+
+ uri = '/oarapi/jobs/details.json?state=Running,Waiting,Launching&user=avakian'
+ raw_json = get_stuff(oar, uri)
+ print "\r\nOAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
+
+ uri = '/oarapi/jobs/' + job_id +'.json'
+ raw_json = get_stuff(oar, uri)
+ print "\r\n OAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
+
+ uri = '/oarapi/jobs/' + job_id + '/resources.json'
+ raw_json = get_stuff(oar, uri)
+ print "\r\n OAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
+
+ time_format = "%Y-%m-%d %H:%M:%S"
+
+ server_timestamp, server_tz = oar.parser.SendRequest("GET_timezone")
+
+ print "\r\n OAR GetTimezone ", server_timestamp, server_tz
+ print(datetime.fromtimestamp(int(server_timestamp)).strftime('%Y-%m-%d %H:%M:%S'))
+
+ uri = '/oarapi/resources/full.json'
+ raw_json = get_stuff(oar, uri)
+ print "\r\n OAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
+
+ uri = '/oarapi/jobs.json?user=avakian'
+ raw_json = get_stuff(oar, uri)
+ print "\r\nOAR ", uri, raw_json, "\r\n KKK \t", raw_json.keys()
+ return
+
+
+
+def TestSlabDriver(job_id = None):
+ if job_id is None:
+ job_id = 1
+
+ if isinstance(job_id, list) and len(job_id) == 1:
+ job_id = job_id[0]
+ slabdriver = SlabDriver(Config())
+
+ #nodes = slabdriver.slab_api.GetReservedNodes()
+ #print " \r\n \r\n GetReservedNodes", nodes
+
+ #sl = slabdriver.slab_api.GetSlices(slice_filter='senslab.avakian_slice', slice_filter_type='slice_hrn')
+ #print "\r\n \r\nGetSlices", sl[0]
+
+ #sl = slabdriver.slab_api.GetSlices(slice_filter='20', slice_filter_type='record_id_user')
+ #print "\r\n \r\nGetSlices", sl
+
+ #sl = slabdriver.slab_api.GetSlices()
+ #print "\r\n \r\nGetSlices", sl
+
+ persons = slabdriver.slab_api.GetPersons()
+ print "\r\n \r\n GetPersons", persons
+
+ leases = slabdriver.slab_api.GetLeases(login='avakian')
+ print "\r\n \r\n GetLeases", leases
+
+
+
+def TestSfi(filename = None):
+
+ if filename is None:
+ filename = "/home/savakian/flab-sfa/test_rspec/my_lyon_nodes.rspec"
+ print " ================= SFI.PY RESOURCES =============", \
+ os.system("sfi.py list senslab")
+
+ print os.system("sfi.py resources")
+
+ print os.system("sfi.py resources -r slab")
+
+ print os.system("sfi.py resources -l all")
+
+
+ print "================ SFI.PY RESOURCES -R SLAB -L ALL ============\r\n", \
+ os.system("sfi.py resources -r slab -l all")
+
+ print "================ WRITING sfi.py resources -l all ===========\r\n", \
+ filename
+
+ filename = filename.split(".")[0]+"_out.rspec"
+ rspecfile = open(filename,"w")
+ r = os.popen("sfi.py resources -l all")
+ for i in r.readlines():
+ rspecfile.write(i)
+ rspecfile.close()
+
+ print " ================= SFI.PY SHOW SLICE ============= \r\n", \
+ os.system("sfi.py resources senslab.avakian_slice")
+
+ print " ================= SFI.PY SHOW USER =============\r\n", \
+ os.system("sfi.py show senslab.avakian_slice")
+
+ print " ================= SFI.PY SHOW NODE =============\r\n", \
+ os.system("sfi.py show senslab.avakian")
+
+ print " ================= SFI.PY SLICES =============\r\n", \
+ os.system("sfi.py show senslab.node6.devlille.senslab.info")
+
+ print " ================= SFI.PY LIST SLICE =============\r\n", \
+ os.system("sfi.py slices")
+
+ print " ================= SFI.PY STATUS SLICE =============\r\n", \
+ os.system("sfi.py status senslab.avakian_slice")
+
+ print " ================= SFI.PY DELETE SLICE =============\r\n", \
+ os.system("sfi.py delete senslab.avakian_slice")
+
+ print " ================= SFI.PY CREATE SLICE =============\r\n", \
+ os.system("sfi.py create senslab.avakian_slice \
+ /home/savakian/flab-sfa/test_rspec/my_lyon_nodes.rspec")
+
+def TestSQL(arg = None):
+ from sfa.storage.model import make_record, RegSlice, RegRecord
+ from sfa.storage.alchemy import dbsession
+ from sqlalchemy.orm.collections import InstrumentedList
+
+ from sqlalchemy.orm import joinedload
+
+ #solo_query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).filter_by(hrn='senslab.avakian_slice').first()
+ #print "\r\n \r\n =========== query_slice_list RegSlice \
+ #joinedload('reg_researchers') senslab.avakian first \r\n \t ", \
+ #solo_query_slice_list.__dict__
+
+ #query_slice_list = dbsession.query(RegSlice).options(joinedload('reg_researchers')).all()
+ #print "\r\n \r\n =========== query_slice_list RegSlice \
+ #joinedload('reg_researchers') ALL \r\n \t", \
+ #query_slice_list[0].__dict__
+
+ #return_slicerec_dictlist = []
+ #record = query_slice_list[0]
+ #print "\r\n \r\n =========== \r\n \t", record
+
+ #tmp = record.__dict__
+ #print "\r\n \r\n =========== \r\n \t", tmp
+ #tmp['reg_researchers'] = tmp['reg_researchers'][0].__dict__
+ #print "\r\n \r\n =========== \r\n \t", tmp
+ ##del tmp['reg_researchers']['_sa_instance_state']
+ #return_slicerec_dictlist.append(tmp)
+
+ #print "\r\n \r\n =========== \r\n \t", return_slicerec_dictlist
+
+ all_records = dbsession.query(RegRecord).all()
+
+ #create hash by (type,hrn)
+ #used to know if a given record is already known to SFA
+
+ records_by_type_hrn = \
+ dict ( [ ( (record.type,record.hrn) , record ) for record in all_records ] )
+ for (rec_type, rec) in records_by_type_hrn :
+ if rec_type == 'user':
+ print>>sys.stderr,"\r\n SLABIMPORT \t keys %s rec %s \r\n" %(rec_type, rec )
+
+ users_rec_by_email = \
+ dict ( [ (record.email, record) for record in all_records if record.type == 'user' ] )
+
+
+def RunAll( arg ):
+ TestLdap()
+ TestOAR()
+ TestSlabDriver()
+ TestSfi()
+
+
+supported_options = {
+ 'OAR' : TestOAR,
+ 'LDAP': TestLdap,
+ 'driver': TestSlabDriver,
+ 'sfi':TestSfi,
+ 'sql':TestSQL,
+ 'all' : RunAll }
+
+def main():
+ opts = parse_options()
+ print opts
+ for opt in opts:
+ supported_options[opt](opts[opt])
+
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
--- /dev/null
+#!/bin/bash
+if (( ! $# == 2 ))
+then
+ echo " Usage : bash_test takes 2 arguments : one jobid and one of the following:"
+ echo " LDAP/ OAR / driver "
+ echo $#
+ exit
+fi
+
+sfi.py list senslab2
+echo " ================= SFI.PY RESOURCES ============="
+sfi.py resources
+
+echo " ================= SFI.PY RESOURCES -R SLAB ============="
+sfi.py resources -r slab
+
+echo " ================= SFI.PY RESOURCES -L ALL ============="
+sfi.py resources -l all
+
+echo " ================= SFI.PY RESOURCES -R SLAB -L ALL ============="
+sfi.py resources -r slab -l all
+
+echo " ================= SFI.PY RESOURCES -L ALL > avakian_adv.rspec ============="
+sfi.py resources -l all > /home/savakian/flab-sfa/avakian_adv.rspec
+
+echo " ================= SFI.PY RESOURCES avakian_adv.rspec ============="
+sfi.py resources senslab2.avakian_slice
+
+
+echo " ================= SFI.PY SHOW SLICE ============="
+sfi.py show senslab2.avakian_slice
+
+echo " ================= SFI.PY SHOW USER ============="
+sfi.py show senslab2.avakian
+
+echo " ================= SFI.PY SHOW NODE ============="
+sfi.py show senslab2.node67.grenoble.senslab.info
+
+echo " ================= SFI.PY SLICES ============="
+sfi.py slices
+
+echo " ================= SFI.PY STATUS SLICE ============="
+sfi.py status senslab2.avakian_slice
+
+echo " ================= SFI.PY CREATE SLICE ============="
+sfi.py create senslab2.avakian_slice /home/savakian/flab-sfa/avakian_adv.rspec
+
+# echo " ================= SFI.PY DELETE SLICE ============="
+# sfi.py delete senslab2.avakian_slice
+
+echo "\r\n"
+echo " PYTHON TEST ", $1, $2
+++ /dev/null
-#!/usr/bin/python
-
-plc_ns="http://www.planet-lab.org/sfa"
-version="2009/07"
import time
import pdb
import xml.dom.minidom
-import xml.dom.ext
import apistub
import inspect
from types import *
from optparse import OptionParser
-from sfa.storage.parameter import Parameter,Mixed
+from sfa.storage.parameter import Parameter, Mixed
-import globals
+plc_ns="http://www.planet-lab.org/sfa"
class SoapError(Exception):
def __init__(self, value):
servport_el.setAttribute("binding", "tns:" + name + "_binding")
soapaddress = servport_el.appendChild(self.wsdl.createElement("soap:address"))
- soapaddress.setAttribute("location", "%s/%s" % (globals.plc_ns,service))
+ soapaddress.setAttribute("location", "%s/%s" % (plc_ns,service))
def compute_wsdl_definitions(self):
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"/>
- """ % (self.interface_name(),globals.plc_ns,globals.plc_ns,globals.plc_ns)
+ """ % (self.interface_name(),plc_ns,plc_ns,plc_ns)
self.wsdl = xml.dom.minidom.parseString(wsdl_text_header)
<types>
<xsd:schema xmlns="http://www.w3.org/2001/XMLSchema" targetNamespace="%s/schema"/>
</types>
- </wsdl:definitions> """ % (self.interface_name(),globals.plc_ns, globals.plc_ns, globals.plc_ns, globals.plc_ns)
+ </wsdl:definitions> """ % (self.interface_name(),plc_ns, plc_ns, plc_ns, plc_ns)
self.types = xml.dom.minidom.parseString(wsdl_text_header)
def pretty_print(self):
if (self.wsdl):
- xml.dom.ext.PrettyPrint(self.wsdl)
+ print xml.dom.minidom.Document.toprettyxml(self.wsdl)
else:
- raise Exception("Empty WSDL")
+ raise Exception("Empty WSDL")
def main():
parser = OptionParser()