merges master (so, no sfa-cm anymore)
authorThierry Parmentelat <thierry.parmentelat@inria.fr>
Thu, 17 Jan 2013 18:36:21 +0000 (19:36 +0100)
committerThierry Parmentelat <thierry.parmentelat@inria.fr>
Thu, 17 Jan 2013 18:36:21 +0000 (19:36 +0100)
30 files changed:
Makefile
Makefile.debian [new file with mode: 0644]
README.txt [moved from INSTALL.txt with 100% similarity]
debian/changelog.in [new file with mode: 0644]
debian/compat [new file with mode: 0644]
debian/control [new file with mode: 0644]
debian/copyright [new file with mode: 0644]
debian/python-sfa.postinst [new file with mode: 0644]
debian/rules [new file with mode: 0755]
debian/sfa-client.install [new file with mode: 0644]
debian/sfa-common.install [new file with mode: 0644]
debian/sfa-dummy.install [new file with mode: 0644]
debian/sfa-federica.install [new file with mode: 0644]
debian/sfa-flashpolicy.install [new file with mode: 0644]
debian/sfa-nito.install [new file with mode: 0644]
debian/sfa-plc.install [new file with mode: 0644]
debian/sfa-sfatables.install [new file with mode: 0644]
debian/sfa-tests.install [new file with mode: 0644]
debian/sfa-xmlbuilder.install [new file with mode: 0644]
debian/source/format [new file with mode: 0644]
init.d/functions.sfa [new file with mode: 0644]
init.d/sfa
setup.py
sfa.spec
sfa/client/sfi.py
sfa/generic/importer.py [new file with mode: 0644]
sfa/generic/wrapper.py [new file with mode: 0644]
stdeb.cfg [new file with mode: 0644]
wsdl/globals.py [deleted file]
wsdl/sfa2wsdl.py

index 6989044..97d5808 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@
 # 
 # overwritten by the specfile
 DESTDIR="/"
-
+PREFIX=/usr
 ##########
 all: python wsdl 
 
@@ -29,12 +29,12 @@ sfa/util/version.py: sfa/util/version.py.in
        sed -e "s,@VERSIONTAG@,$(VERSIONTAG),g" -e "s,@SCMURL@,$(SCMURL),g" sfa/util/version.py.in > $@
 
 xmlbuilder-install:
-       cd xmlbuilder-0.9 && python setup.py install --root=$(DESTDIR) && cd -
+       cd xmlbuilder-0.9 && python setup.py install --prefix=$(PREFIX) --root=$(DESTDIR) && cd -
        rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info
 
 # postinstall steps - various cleanups and tweaks for a nicer rpm
 python-install:
-       python setup.py install --root=$(DESTDIR)       
+       python setup.py install --prefix=$(PREFIX) --root=$(DESTDIR)
        chmod 444 $(DESTDIR)/etc/sfa/default_config.xml
        rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/*egg-info
        rm -rf $(DESTDIR)/usr/lib*/python*/site-packages/sfa/storage/migrations
@@ -61,6 +61,12 @@ wsdl-clean:
 
 .PHONY: wsdl wsdl-install wsdl-clean
 
+##########
+debian: version
+       $(MAKE) -f Makefile.debian debian
+debian.clean: 
+       $(MAKE) -f Makefile.debian clean
+
 ##########
 tests-install:
        mkdir -p $(DESTDIR)/usr/share/sfa/tests
diff --git a/Makefile.debian b/Makefile.debian
new file mode 100644 (file)
index 0000000..4ac9c58
--- /dev/null
@@ -0,0 +1,24 @@
+PROJECT=sfa
+VERSION=$(shell python -c "from sfa.util.version import version_tag; print version_tag" | sed -e s,-,.,)
+DATE=$(shell date -u +"%a, %d %b %Y %T")
+DEBIAN_TARBALL=../$(PROJECT)_$(VERSION).orig.tar.bz2
+
+debian: debian/changelog debian.source debian.package
+
+force:
+
+debian/changelog: debian/changelog.in
+       sed -e "s|@VERSION@|$(VERSION)|" -e "s|@DATE@|$(DATE)|" debian/changelog.in > debian/changelog
+
+# TARBALL is passed from the main build (/build/Makefile) to the 'make debian' call
+debian.source: force 
+       rsync -a $(TARBALL) $(DEBIAN_TARBALL)
+
+debian.package:
+       debuild -uc -us -b 
+
+debian.clean:
+       $(MAKE) -f debian/rules clean
+       rm -rf build/ MANIFEST ../*.tar.gz ../*.dsc ../*.build
+       find . -name '*.pyc' -delete
+
similarity index 100%
rename from INSTALL.txt
rename to README.txt
diff --git a/debian/changelog.in b/debian/changelog.in
new file mode 100644 (file)
index 0000000..381dc15
--- /dev/null
@@ -0,0 +1,5 @@
+sfa (@VERSION@) UNRELEASED; urgency=low
+
+  * Initial release. 
+
+ -- Thierry Parmentelat <thierry.parmentelat@inria.fr>  @DATE@ +0000
diff --git a/debian/compat b/debian/compat
new file mode 100644 (file)
index 0000000..ec63514
--- /dev/null
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644 (file)
index 0000000..0b7e461
--- /dev/null
@@ -0,0 +1,64 @@
+Source: sfa
+Maintainer: Thierry Parmentelat <Thierry.Parmentelat@inria.fr>
+Section: misc
+Priority: optional
+Standards-Version: 3.9.2
+Build-Depends: devscripts, debhelper (>=7.0.50~), debconf, dpatch
+
+Package: sfa-common
+Architecture: any
+Depends: python (>= 2.5), python-openssl (>= 0.7), python-m2crypto, python-dateutil, python-lxml, python-libxslt1, python-setuptools, python-zsi, postgresql (>= 8.2), python-sqlalchemy, python-migrate
+#further depends from fedora - no obvious match on debian for now
+#Requires: xmlsec1-openssl-devel  (libxmlsec1-dev or libxmlsec1-openssl or ??)
+#Requires: util-linux-ng
+#Requires: postgresql-python
+#Requires: python-psycopg2
+# the eucalyptus aggregate uses this module
+#Requires: python-xmlbuilder
+Description: Slice Facility Architecture, generic implementation derived from PlanetLab
+
+Package: sfa-flashpolicy
+Architecture: any
+Depends: sfa-common
+Description: SFA support for flash clients
+
+Package: sfa-client
+Architecture: any
+Depends: sfa-common
+Description: sfi, the SFA experimenter-side CLI
+
+Package: sfa-plc
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around MyPLC
+
+Package: sfa-federica
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around Federica
+
+Package: sfa-nitos
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around NITOS
+
+Package: sfa-dummy
+Architecture: any
+Depends: sfa-common
+Description: the SFA layer around a Dummy Testbed 
+
+Package: sfa-sfatables
+Architecture: any
+Depends: sfa-common
+Description: sfatables policy tool for SFA
+
+Package: sfa-xmlbuilder
+Architecture: any
+Provides: python-xmlbuilder
+Description: third-party xmlbuilder tool
+
+Package: sfa-tests
+Architecture: any
+Depends: sfa-common
+Description: unit tests suite for SFA
+
diff --git a/debian/copyright b/debian/copyright
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/debian/python-sfa.postinst b/debian/python-sfa.postinst
new file mode 100644 (file)
index 0000000..16bcf7a
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/bash
+cp 
diff --git a/debian/rules b/debian/rules
new file mode 100755 (executable)
index 0000000..b8796e6
--- /dev/null
@@ -0,0 +1,5 @@
+#!/usr/bin/make -f
+# -*- makefile -*-
+
+%:
+       dh $@
diff --git a/debian/sfa-client.install b/debian/sfa-client.install
new file mode 100644 (file)
index 0000000..fead11d
--- /dev/null
@@ -0,0 +1,8 @@
+etc/sfa/sfi_config
+usr/bin/sfi*.py*
+usr/bin/sfi
+usr/bin/get*.py*
+usr/bin/setRecord.py*
+usr/bin/sfascan.py*
+usr/bin/sfascan
+usr/bin/sfadump.py*
diff --git a/debian/sfa-common.install b/debian/sfa-common.install
new file mode 100644 (file)
index 0000000..93531fb
--- /dev/null
@@ -0,0 +1,24 @@
+usr/lib*/python*/site-packages/sfa/__init__.py*
+usr/lib*/python*/site-packages/sfa/trust
+usr/lib*/python*/site-packages/sfa/storage
+usr/lib*/python*/site-packages/sfa/util
+usr/lib*/python*/site-packages/sfa/server
+usr/lib*/python*/site-packages/sfa/methods
+usr/lib*/python*/site-packages/sfa/generic
+usr/lib*/python*/site-packages/sfa/managers
+usr/lib*/python*/site-packages/sfa/importer
+usr/lib*/python*/site-packages/sfa/rspecs
+usr/lib*/python*/site-packages/sfa/client
+usr/bin/sfa-start.py*
+usr/bin/sfaadmin.py*
+usr/bin/sfaadmin
+usr/bin/keyconvert.py*
+usr/bin/sfa-config-tty
+usr/bin/sfa-config
+etc/sfa/default_config.xml
+etc/sfa/aggregates.xml
+etc/sfa/registries.xml
+etc/init.d/sfa
+usr/share/sfa/migrations
+usr/share/sfa/examples
+var/www/html/wsdl/*.wsdl
diff --git a/debian/sfa-dummy.install b/debian/sfa-dummy.install
new file mode 100644 (file)
index 0000000..3af0686
--- /dev/null
@@ -0,0 +1 @@
+usr/lib*/python*/site-packages/sfa/dummy
diff --git a/debian/sfa-federica.install b/debian/sfa-federica.install
new file mode 100644 (file)
index 0000000..c5abdd1
--- /dev/null
@@ -0,0 +1 @@
+usr/lib*/python*/site-packages/sfa/federica
diff --git a/debian/sfa-flashpolicy.install b/debian/sfa-flashpolicy.install
new file mode 100644 (file)
index 0000000..c0601a8
--- /dev/null
@@ -0,0 +1,2 @@
+usr/bin/sfa_flashpolicy.py*
+etc/sfa/sfa_flashpolicy_config.xml
diff --git a/debian/sfa-nito.install b/debian/sfa-nito.install
new file mode 100644 (file)
index 0000000..e76e3cc
--- /dev/null
@@ -0,0 +1 @@
+usr/lib*/python*/site-packages/sfa/nitos
diff --git a/debian/sfa-plc.install b/debian/sfa-plc.install
new file mode 100644 (file)
index 0000000..78938fe
--- /dev/null
@@ -0,0 +1,10 @@
+usr/lib*/python*/site-packages/sfa/planetlab
+usr/lib*/python*/site-packages/sfa/openstack
+etc/sfa/pl.rng
+etc/sfa/credential.xsd
+etc/sfa/top.xsd
+etc/sfa/sig.xsd
+etc/sfa/xml.xsd
+etc/sfa/protogeni-rspec-common.xsd
+etc/sfa/topology
+usr/bin/gen-sfa-cm-config.py*
diff --git a/debian/sfa-sfatables.install b/debian/sfa-sfatables.install
new file mode 100644 (file)
index 0000000..74186b0
--- /dev/null
@@ -0,0 +1,3 @@
+etc/sfatables/*
+usr/bin/sfatables
+usr/lib*/python*/site-packages/sfatables
diff --git a/debian/sfa-tests.install b/debian/sfa-tests.install
new file mode 100644 (file)
index 0000000..18cdf5a
--- /dev/null
@@ -0,0 +1 @@
+usr/share/sfa/tests
diff --git a/debian/sfa-xmlbuilder.install b/debian/sfa-xmlbuilder.install
new file mode 100644 (file)
index 0000000..6e89f50
--- /dev/null
@@ -0,0 +1 @@
+usr/lib*/python*/site-packages/xmlbuilder
diff --git a/debian/source/format b/debian/source/format
new file mode 100644 (file)
index 0000000..163aaf8
--- /dev/null
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/init.d/functions.sfa b/init.d/functions.sfa
new file mode 100644 (file)
index 0000000..010ab41
--- /dev/null
@@ -0,0 +1,840 @@
+# -*-Shell-script-*-
+#
+# Thierry, jan 17 2013
+# this file was put together by Jordan to provide the same interface as 
+# /etc/init.d/functions on fedora systems 
+# (probably is extracted from one of the fedora releases as is, not sure about that)
+# 
+# we unconditionnally ship this as /etc/init.d/functions.sfa, 
+# and then our own initscript (init.d/sfa) does source that
+# conditionnally, i.e. when run on debian systems
+####################
+#
+# functions    This file contains functions to be used by most or all
+#              shell scripts in the /etc/init.d directory.
+#
+
+TEXTDOMAIN=initscripts
+
+# Make sure umask is sane
+umask 022
+
+# Set up a default search path.
+PATH="/sbin:/usr/sbin:/bin:/usr/bin"
+export PATH
+
+if [ $PPID -ne 1 -a -z "$SYSTEMCTL_SKIP_REDIRECT" ] && \
+               ( /bin/mountpoint -q /cgroup/systemd || /bin/mountpoint -q /sys/fs/cgroup/systemd ) ; then
+        case "$0" in
+        /etc/init.d/*|/etc/rc.d/init.d/*)
+               _use_systemctl=1
+               ;;
+       esac
+fi
+
+systemctl_redirect () {
+       local s
+       local prog=${1##*/}
+        local command=$2
+
+       case "$command" in
+       start)
+               s=$"Starting $prog (via systemctl): "
+               ;;
+       stop)
+               s=$"Stopping $prog (via systemctl): "
+               ;;
+       reload|try-reload)
+               s=$"Reloading $prog configuration (via systemctl): "
+               ;;
+       restart|try-restart|condrestart)
+               s=$"Restarting $prog (via systemctl): "
+               ;;
+       esac
+
+       action "$s" /bin/systemctl $command "$prog.service"
+}
+
+# Get a sane screen width
+[ -z "${COLUMNS:-}" ] && COLUMNS=80
+
+#if [ -z "${CONSOLETYPE:-}" ]; then
+#  if [ -r "/dev/stderr" ]; then
+#    CONSOLETYPE="$(/sbin/consoletype < /dev/stderr)"
+#  else
+#    CONSOLETYPE="$(/sbin/consoletype)"
+#  fi
+#fi
+
+if [ -z "${NOLOCALE:-}" ] && [ -z "${LANGSH_SOURCED:-}" ] && [ -f /etc/sysconfig/i18n ] ; then
+  . /etc/profile.d/lang.sh 2>/dev/null
+  # avoid propagating LANGSH_SOURCED any further
+  unset LANGSH_SOURCED
+fi
+
+# Read in our configuration
+if [ -z "${BOOTUP:-}" ]; then
+  if [ -f /etc/sysconfig/init ]; then
+      . /etc/sysconfig/init
+  else
+    # This all seem confusing? Look in /etc/sysconfig/init,
+    # or in /usr/doc/initscripts-*/sysconfig.txt
+    BOOTUP=color
+    RES_COL=60
+    MOVE_TO_COL="echo -en \\033[${RES_COL}G"
+    SETCOLOR_SUCCESS="echo -en \\033[1;32m"
+    SETCOLOR_FAILURE="echo -en \\033[1;31m"
+    SETCOLOR_WARNING="echo -en \\033[1;33m"
+    SETCOLOR_NORMAL="echo -en \\033[0;39m"
+    LOGLEVEL=1
+  fi
+  if [ "$CONSOLETYPE" = "serial" ]; then
+      BOOTUP=serial
+      MOVE_TO_COL=
+      SETCOLOR_SUCCESS=
+      SETCOLOR_FAILURE=
+      SETCOLOR_WARNING=
+      SETCOLOR_NORMAL=
+  fi
+fi
+
+# Interpret escape sequences in an fstab entry
+fstab_decode_str() {
+       fstab-decode echo "$1"
+}
+
+# Check if any of $pid (could be plural) are running
+checkpid() {
+       local i
+
+       for i in $* ; do
+               [ -d "/proc/$i" ] && return 0
+       done
+       return 1
+}
+
+__readlink() {
+    ls -bl "$@" 2>/dev/null| awk '{ print $NF }'
+}
+
+__fgrep() {
+    s=$1
+    f=$2
+    while read line; do
+       if strstr "$line" "$s"; then
+           echo $line
+           return 0
+       fi
+    done < $f
+    return 1
+}
+
+# __umount_loop awk_program fstab_file first_msg retry_msg umount_args
+# awk_program should process fstab_file and return a list of fstab-encoded
+# paths; it doesn't have to handle comments in fstab_file.
+__umount_loop() {
+       local remaining sig=
+       local retry=3 count
+
+       remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
+       while [ -n "$remaining" -a "$retry" -gt 0 ]; do
+               if [ "$retry" -eq 3 ]; then
+                       action "$3" fstab-decode umount $5 $remaining
+               else
+                       action "$4" fstab-decode umount $5 $remaining
+               fi
+               count=4
+               remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
+               while [ "$count" -gt 0 ]; do
+                       [ -z "$remaining" ] && break
+                       count=$(($count-1))
+                       # jordan # usleep 500000
+                       sleep 0.5
+                       remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
+               done
+               [ -z "$remaining" ] && break
+               fstab-decode /sbin/fuser -k -m $sig $remaining >/dev/null
+               sleep 3
+               retry=$(($retry -1))
+               sig=-9
+       done
+}
+
+# Similar to __umount loop above, specialized for loopback devices
+__umount_loopback_loop() {
+       local remaining devremaining sig=
+       local retry=3
+
+       remaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $2}' /proc/mounts)
+       devremaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $1}' /proc/mounts)
+       while [ -n "$remaining" -a "$retry" -gt 0 ]; do
+               if [ "$retry" -eq 3 ]; then
+                       action $"Unmounting loopback filesystems: " \
+                               fstab-decode umount $remaining
+               else
+                       action $"Unmounting loopback filesystems (retry):" \
+                               fstab-decode umount $remaining
+               fi
+               for dev in $devremaining ; do
+                       losetup $dev > /dev/null 2>&1 && \
+                               action $"Detaching loopback device $dev: " \
+                               losetup -d $dev
+               done
+               remaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $2}' /proc/mounts)
+               devremaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $1}' /proc/mounts)
+               [ -z "$remaining" ] && break
+               fstab-decode /sbin/fuser -k -m $sig $remaining >/dev/null
+               sleep 3
+               retry=$(($retry -1))
+               sig=-9
+       done
+}
+
+# __proc_pids {program} [pidfile]
+# Set $pid to pids from /var/run* for {program}.  $pid should be declared
+# local in the caller.
+# Returns LSB exit code for the 'status' action.
+__pids_var_run() {
+       local base=${1##*/}
+       local pid_file=${2:-/var/run/$base.pid}
+
+       pid=
+       if [ -f "$pid_file" ] ; then
+               local line p
+
+               [ ! -r "$pid_file" ] && return 4 # "user had insufficient privilege"
+               while : ; do
+                       read line
+                       [ -z "$line" ] && break
+                       for p in $line ; do
+                               [ -z "${p//[0-9]/}" ] && [ -d "/proc/$p" ] && pid="$pid $p"
+                       done
+               done < "$pid_file"
+
+               if [ -n "$pid" ]; then
+                       return 0
+               fi
+               return 1 # "Program is dead and /var/run pid file exists"
+       fi
+       return 3 # "Program is not running"
+}
+
+# Output PIDs of matching processes, found using pidof
+__pids_pidof() {
+       pidof -c -o $$ -o $PPID -o %PPID -x "$1" || \
+               pidof -c -o $$ -o $PPID -o %PPID -x "${1##*/}"
+# jordan #     pidof -c -m -o $$ -o $PPID -o %PPID -x "$1" || \
+# jordan #             pidof -c -m -o $$ -o $PPID -o %PPID -x "${1##*/}"
+}
+
+
+# A function to start a program.
+daemon() {
+       # Test syntax.
+       local gotbase= force= nicelevel corelimit
+       local pid base= user= nice= bg= pid_file=
+       local cgroup=
+       nicelevel=0
+       while [ "$1" != "${1##[-+]}" ]; do
+         case $1 in
+           '')    echo $"$0: Usage: daemon [+/-nicelevel] {program}"
+                  return 1;;
+           --check)
+                  base=$2
+                  gotbase="yes"
+                  shift 2
+                  ;;
+           --check=?*)
+                  base=${1#--check=}
+                  gotbase="yes"
+                  shift
+                  ;;
+           --user)
+                  user=$2
+                  shift 2
+                  ;;
+           --user=?*)
+                  user=${1#--user=}
+                  shift
+                  ;;
+           --pidfile)
+                  pid_file=$2
+                  shift 2
+                  ;;
+           --pidfile=?*)
+                  pid_file=${1#--pidfile=}
+                  shift
+                  ;;
+           --force)
+                  force="force"
+                  shift
+                  ;;
+           [-+][0-9]*)
+                  nice="nice -n $1"
+                  shift
+                  ;;
+           *)     echo $"$0: Usage: daemon [+/-nicelevel] {program}"
+                  return 1;;
+         esac
+       done
+
+        # Save basename.
+        [ -z "$gotbase" ] && base=${1##*/}
+
+        # See if it's already running. Look *only* at the pid file.
+       __pids_var_run "$base" "$pid_file"
+
+       [ -n "$pid" -a -z "$force" ] && return
+
+       # make sure it doesn't core dump anywhere unless requested
+       corelimit="ulimit -S -c ${DAEMON_COREFILE_LIMIT:-0}"
+       
+       # if they set NICELEVEL in /etc/sysconfig/foo, honor it
+       [ -n "${NICELEVEL:-}" ] && nice="nice -n $NICELEVEL"
+       
+       # if they set CGROUP_DAEMON in /etc/sysconfig/foo, honor it
+       if [ -n "${CGROUP_DAEMON}" ]; then
+               if [ ! -x /bin/cgexec ]; then
+                       echo -n "Cgroups not installed"; warning
+                       echo
+               else
+                       cgroup="/bin/cgexec";
+                       for i in $CGROUP_DAEMON; do
+                               cgroup="$cgroup -g $i";
+                       done
+               fi
+       fi
+
+       # Echo daemon
+        [ "${BOOTUP:-}" = "verbose" -a -z "${LSB:-}" ] && echo -n " $base"
+
+       # And start it up.
+       if [ -z "$user" ]; then
+          $cgroup $nice /bin/bash -c "$corelimit >/dev/null 2>&1 ; $*"
+       else
+          $cgroup $nice runuser -s /bin/bash $user -c "$corelimit >/dev/null 2>&1 ; $*"
+       fi
+
+       [ "$?" -eq 0 ] && success $"$base startup" || failure $"$base startup"
+}
+
+# A function to stop a program.
+killproc() {
+       local RC killlevel= base pid pid_file= delay
+
+       RC=0; delay=3
+       # Test syntax.
+       if [ "$#" -eq 0 ]; then
+               echo $"Usage: killproc [-p pidfile] [ -d delay] {program} [-signal]"
+               return 1
+       fi
+       if [ "$1" = "-p" ]; then
+               pid_file=$2
+               shift 2
+       fi
+       if [ "$1" = "-d" ]; then
+               delay=$2
+               shift 2
+       fi
+        
+
+       # check for second arg to be kill level
+       [ -n "${2:-}" ] && killlevel=$2
+
+        # Save basename.
+        base=${1##*/}
+
+        # Find pid.
+       __pids_var_run "$1" "$pid_file"
+       RC=$?
+       if [ -z "$pid" ]; then
+               if [ -z "$pid_file" ]; then
+                       pid="$(__pids_pidof "$1")"
+               else
+                       [ "$RC" = "4" ] && { failure $"$base shutdown" ; return $RC ;}
+               fi
+       fi
+
+        # Kill it.
+        if [ -n "$pid" ] ; then
+                [ "$BOOTUP" = "verbose" -a -z "${LSB:-}" ] && echo -n "$base "
+               if [ -z "$killlevel" ] ; then
+                      if checkpid $pid 2>&1; then
+                          # TERM first, then KILL if not dead
+                          kill -TERM $pid >/dev/null 2>&1
+                          sleep 0.1
+                          # jordan # usleep 100000
+                          if checkpid $pid && sleep 1 &&
+                             checkpid $pid && sleep $delay &&
+                             checkpid $pid ; then
+                                kill -KILL $pid >/dev/null 2>&1
+                               sleep 0.1
+                               # jordan # usleep 100000
+                          fi
+                       fi
+                       checkpid $pid
+                       RC=$?
+                       [ "$RC" -eq 0 ] && failure $"$base shutdown" || success $"$base shutdown"
+                       RC=$((! $RC))
+               # use specified level only
+               else
+                       if checkpid $pid; then
+                               kill $killlevel $pid >/dev/null 2>&1
+                               RC=$?
+                               [ "$RC" -eq 0 ] && success $"$base $killlevel" || failure $"$base $killlevel"
+                       elif [ -n "${LSB:-}" ]; then
+                               RC=7 # Program is not running
+                       fi
+               fi
+       else
+               if [ -n "${LSB:-}" -a -n "$killlevel" ]; then
+                       RC=7 # Program is not running
+               else
+                       failure $"$base shutdown"
+                       RC=0
+               fi
+       fi
+
+        # Remove pid file if any.
+       if [ -z "$killlevel" ]; then
+            rm -f "${pid_file:-/var/run/$base.pid}"
+       fi
+       return $RC
+}
+
+# A function to find the pid of a program. Looks *only* at the pidfile
+pidfileofproc() {
+       local pid
+
+       # Test syntax.
+       if [ "$#" = 0 ] ; then
+               echo $"Usage: pidfileofproc {program}"
+               return 1
+       fi
+
+       __pids_var_run "$1"
+       [ -n "$pid" ] && echo $pid
+       return 0
+}
+
+# A function to find the pid of a program.
+pidofproc() {
+       local RC pid pid_file=
+
+       # Test syntax.
+       if [ "$#" = 0 ]; then
+               echo $"Usage: pidofproc [-p pidfile] {program}"
+               return 1
+       fi
+       if [ "$1" = "-p" ]; then
+               pid_file=$2
+               shift 2
+       fi
+       fail_code=3 # "Program is not running"
+
+       # First try "/var/run/*.pid" files
+       __pids_var_run "$1" "$pid_file"
+       RC=$?
+       if [ -n "$pid" ]; then
+               echo $pid
+               return 0
+       fi
+
+       [ -n "$pid_file" ] && return $RC
+       __pids_pidof "$1" || return $RC
+}
+
+status() {
+       local base pid lock_file= pid_file=
+
+       # Test syntax.
+       if [ "$#" = 0 ] ; then
+               echo $"Usage: status [-p pidfile] {program}"
+               return 1
+       fi
+       if [ "$1" = "-p" ]; then
+               pid_file=$2
+               shift 2
+       fi
+       if [ "$1" = "-l" ]; then
+               lock_file=$2
+               shift 2
+       fi
+       base=${1##*/}
+
+       if [ "$_use_systemctl" = "1" ]; then
+               systemctl status ${0##*/}.service
+               return $?
+       fi
+
+       # First try "pidof"
+       __pids_var_run "$1" "$pid_file"
+       RC=$?
+       if [ -z "$pid_file" -a -z "$pid" ]; then
+               pid="$(__pids_pidof "$1")"
+       fi
+       if [ -n "$pid" ]; then
+               echo $"${base} (pid $pid) is running..."
+               return 0
+       fi
+
+       case "$RC" in
+               0)
+                       echo $"${base} (pid $pid) is running..."
+                       return 0
+                       ;;
+               1)
+                       echo $"${base} dead but pid file exists"
+                       return 1
+                       ;;
+               4)
+                       echo $"${base} status unknown due to insufficient privileges."
+                       return 4
+                       ;;
+       esac
+       if [ -z "${lock_file}" ]; then
+               lock_file=${base}
+       fi
+       # See if /var/lock/subsys/${lock_file} exists
+       if [ -f /var/lock/subsys/${lock_file} ]; then
+               echo $"${base} dead but subsys locked"
+               return 2
+       fi
+       echo $"${base} is stopped"
+       return 3
+}
+
+echo_success() {
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "["
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS
+  echo -n $"  OK  "
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "]"
+  echo -ne "\r"
+  return 0
+}
+
+echo_failure() {
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "["
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE
+  echo -n $"FAILED"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "]"
+  echo -ne "\r"
+  return 1
+}
+
+echo_passed() {
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "["
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
+  echo -n $"PASSED"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "]"
+  echo -ne "\r"
+  return 1
+}
+
+echo_warning() {
+  [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
+  echo -n "["
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
+  echo -n $"WARNING"
+  [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
+  echo -n "]"
+  echo -ne "\r"
+  return 1
+}
+
+# Inform the graphical boot of our current state
+update_boot_stage() {
+  if [ -x /usr/bin/plymouth ]; then
+      /usr/bin/plymouth --update="$1"
+  fi
+  return 0
+}
+
+# Log that something succeeded
+success() {
+  [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_success
+  return 0
+}
+
+# Log that something failed
+failure() {
+  local rc=$?
+  [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_failure
+  [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --details
+  return $rc
+}
+
+# Log that something passed, but may have had errors. Useful for fsck
+passed() {
+  local rc=$?
+  [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_passed
+  return $rc
+}  
+
+# Log a warning
+warning() {
+  local rc=$?
+  [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_warning
+  return $rc
+}  
+
+# Run some action. Log its output.
+action() {
+  local STRING rc
+
+  STRING=$1
+  echo -n "$STRING "
+  shift
+  "$@" && success $"$STRING" || failure $"$STRING"
+  rc=$?
+  echo
+  return $rc
+}
+
+# returns OK if $1 contains $2
+strstr() {
+  [ "${1#*$2*}" = "$1" ] && return 1
+  return 0
+}
+
+# Confirm whether we really want to run this service
+confirm() {
+  [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --hide-splash
+  while : ; do 
+      echo -n $"Start service $1 (Y)es/(N)o/(C)ontinue? [Y] "
+      read answer
+      if strstr $"yY" "$answer" || [ "$answer" = "" ] ; then
+         return 0
+      elif strstr $"cC" "$answer" ; then
+        rm -f /var/run/confirm
+        [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --show-splash
+         return 2
+      elif strstr $"nN" "$answer" ; then
+         return 1
+      fi
+  done
+}
+
+# resolve a device node to its major:minor numbers in decimal or hex
+get_numeric_dev() {
+(
+    fmt="%d:%d"
+    if [ "$1" = "hex" ]; then
+        fmt="%x:%x"
+    fi
+    ls -lH "$2" | awk '{ sub(/,/, "", $5); printf("'"$fmt"'", $5, $6); }'
+) 2>/dev/null
+}
+
+# Check whether file $1 is a backup or rpm-generated file and should be ignored
+is_ignored_file() {
+    case "$1" in
+       *~ | *.bak | *.orig | *.rpmnew | *.rpmorig | *.rpmsave)
+           return 0
+           ;;
+    esac
+    return 1
+}
+
+# Evaluate shvar-style booleans
+is_true() {
+    case "$1" in
+       [tT] | [yY] | [yY][eE][sS] | [tT][rR][uU][eE])
+       return 0
+       ;;
+    esac
+    return 1
+}
+
+# Evaluate shvar-style booleans
+is_false() {
+    case "$1" in
+       [fF] | [nN] | [nN][oO] | [fF][aA][lL][sS][eE])
+       return 0
+       ;;
+    esac
+    return 1
+}
+
+key_is_random() {
+    [ "$1" = "/dev/urandom" -o "$1" = "/dev/hw_random" \
+       -o "$1" = "/dev/random" ]
+}
+
+find_crypto_mount_point() {
+    local fs_spec fs_file fs_vfstype remaining_fields
+    local fs
+    while read fs_spec fs_file remaining_fields; do
+       if [ "$fs_spec" = "/dev/mapper/$1" ]; then
+           echo $fs_file
+           break;
+       fi
+    done < /etc/fstab
+}
+
+# Because of a chicken/egg problem, init_crypto must be run twice.  /var may be
+# encrypted but /var/lib/random-seed is needed to initialize swap.
+init_crypto() {
+    local have_random dst src key opt mode owner params makeswap skip arg opt
+    local param value rc ret mke2fs mdir prompt mount_point
+
+    ret=0
+    have_random=$1
+    while read dst src key opt; do
+       [ -z "$dst" -o "${dst#\#}" != "$dst" ] && continue
+        [ -b "/dev/mapper/$dst" ] && continue;
+       if [ "$have_random" = 0 ] && key_is_random "$key"; then
+           continue
+       fi
+       if [ -n "$key" -a "x$key" != "xnone" ]; then
+           if test -e "$key" ; then
+               owner=$(ls -l $key | (read a b owner rest; echo $owner))
+               if ! key_is_random "$key"; then
+                   mode=$(ls -l "$key" | cut -c 5-10)
+                   if [ "$mode" != "------" ]; then
+                      echo $"INSECURE MODE FOR $key"
+                   fi
+               fi
+               if [ "$owner" != root ]; then
+                   echo $"INSECURE OWNER FOR $key"
+               fi
+           else
+               echo $"Key file for $dst not found, skipping"
+               ret=1
+               continue
+           fi
+       else
+           key=""
+       fi
+       params=""
+       makeswap=""
+       mke2fs=""
+       skip=""
+       # Parse the src field for UUID= and convert to real device names
+       if [ "${src%%=*}" == "UUID" ]; then
+               src=$(/sbin/blkid -t "$src" -l -o device)
+       elif [ "${src/^\/dev\/disk\/by-uuid\/}" != "$src" ]; then
+               src=$(__readlink $src)
+       fi
+       # Is it a block device?
+       [ -b "$src" ] || continue
+       # Is it already a device mapper slave? (this is gross)
+       devesc=${src##/dev/}
+       devesc=${devesc//\//!}
+       for d in /sys/block/dm-*/slaves ; do
+           [ -e $d/$devesc ] && continue 2
+       done
+       # Parse the options field, convert to cryptsetup parameters and
+       # contruct the command line
+       while [ -n "$opt" ]; do
+           arg=${opt%%,*}
+           opt=${opt##$arg}
+           opt=${opt##,}
+           param=${arg%%=*}
+           value=${arg##$param=}
+
+           case "$param" in
+           cipher)
+               params="$params -c $value"
+               if [ -z "$value" ]; then
+                   echo $"$dst: no value for cipher option, skipping"
+                   skip="yes"
+               fi
+           ;;
+           size)
+               params="$params -s $value"
+               if [ -z "$value" ]; then
+                   echo $"$dst: no value for size option, skipping"
+                   skip="yes"
+               fi
+           ;;
+           hash)
+               params="$params -h $value"
+               if [ -z "$value" ]; then
+                   echo $"$dst: no value for hash option, skipping"
+                   skip="yes"
+               fi
+           ;;
+           verify)
+               params="$params -y"
+           ;;
+           swap)
+               makeswap=yes
+               ;;
+           tmp)
+               mke2fs=yes
+           esac
+       done
+       if [ "$skip" = "yes" ]; then
+           ret=1
+           continue
+       fi
+       if [ -z "$makeswap" ] && cryptsetup isLuks "$src" 2>/dev/null ; then
+           if key_is_random "$key"; then
+               echo $"$dst: LUKS requires non-random key, skipping"
+               ret=1
+               continue
+           fi
+           if [ -n "$params" ]; then
+               echo "$dst: options are invalid for LUKS partitions," \
+                   "ignoring them"
+           fi
+           if [ -n "$key" ]; then
+               /sbin/cryptsetup -d $key luksOpen "$src" "$dst" <&1 2>/dev/null && success || failure
+               rc=$?
+           else
+               mount_point="$(find_crypto_mount_point $dst)"
+               [ -n "$mount_point" ] || mount_point=${src##*/}
+               prompt=$(printf $"%s is password protected" "$mount_point")
+               plymouth ask-for-password --prompt "$prompt" --command="/sbin/cryptsetup luksOpen -T1 $src $dst" <&1
+               rc=$?
+           fi
+       else
+           [ -z "$key" ] && plymouth --hide-splash
+           /sbin/cryptsetup $params ${key:+-d $key} create "$dst" "$src" <&1 2>/dev/null && success || failure
+           rc=$?
+           [ -z "$key" ] && plymouth --show-splash
+       fi
+       if [ $rc -ne 0 ]; then
+           ret=1
+           continue
+       fi
+       if [ -b "/dev/mapper/$dst" ]; then
+           if [ "$makeswap" = "yes" ]; then
+               mkswap "/dev/mapper/$dst" 2>/dev/null >/dev/null
+           fi
+           if [ "$mke2fs" = "yes" ]; then
+               if mke2fs "/dev/mapper/$dst" 2>/dev/null >/dev/null \
+                   && mdir=$(mktemp -d /tmp/mountXXXXXX); then
+                   mount "/dev/mapper/$dst" "$mdir" && chmod 1777 "$mdir"
+                   umount "$mdir"
+                   rmdir "$mdir"
+               fi
+           fi
+       fi
+    done < /etc/crypttab
+    return $ret
+}
+
+# A sed expression to filter out the files that is_ignored_file recognizes
+__sed_discard_ignored_files='/\(~\|\.bak\|\.orig\|\.rpmnew\|\.rpmorig\|\.rpmsave\)$/d'
+
+if [ "$_use_systemctl" = "1" ]; then
+        if  [ "x$1" = xstart -o \
+                "x$1" = xstop -o \
+                "x$1" = xrestart -o \
+                "x$1" = xreload -o \
+                "x$1" = xtry-restart -o \
+                "x$1" = xforce-reload -o \
+                "x$1" = xcondrestart ] ; then
+
+               systemctl_redirect $0 $1
+               exit $?
+       fi
+fi
index 69cf6f6..feb6163 100755 (executable)
@@ -8,10 +8,47 @@
 # description:   Wraps PLCAPI into the SFA compliant API
 #
 
-# source function library
-. /etc/init.d/functions
-# Default locations
-PGDATA=/var/lib/pgsql/data
+####################
+# borrowed from postgresql
+function debian_get_postgresql_versions () {
+    versions=()
+    for v in `ls /usr/lib/postgresql/ 2>/dev/null`; do
+       if [ -x /usr/lib/postgresql/$v/bin/pg_ctl ] && [ ! -x /etc/init.d/postgresql-$v ]; then
+            versions+=($v)
+       fi
+    done
+    if [[ ${#versions[*]} == "0" ]]; then 
+       echo "E: Missing postgresql installation. Aborting."
+       exit
+    fi
+    if [[ ${#versions[*]} != "1" ]]; then 
+       echo "E: Too many postgresql versions installed. Aborting."
+       exit
+    fi
+    pgver=${versions[0]}
+}
+
+####################
+if [ -f /etc/redhat-release ] ; then
+    # source function library
+    . /etc/init.d/functions
+    PGDATA=/var/lib/pgsql/data/
+    PGWATCH=postmaster
+    PGLOCK=/var/lock/subsys/postgresql
+    SFALOCK=/var/lock/subsys/sfa-start.pid
+elif [ -f /etc/debian_version ] ; then
+    . /etc/init.d/funcfions.sfa
+    debian_get_postgresql_versions
+    PGDATA=/etc/postgresql/$pgver/main/
+    PGWATCH=postgres
+    PGLOCK=/var/run/postgresql/$pgver-main.pid
+    SFALOCK=/var/run/sfa-start.pid
+else
+    echo "initscript can only handle redhat/fedora or debian/ubuntu systems"
+    exit 1
+fi
+
+
 postgresql_conf=$PGDATA/postgresql.conf
 pghba_conf=$PGDATA/pg_hba.conf
 postgresql_sysconfig=/etc/sysconfig/pgsql
@@ -44,7 +81,7 @@ check ()
 function postgresql_check () {
 
     # wait until postmaster is up and running - or 10s max
-    if status postmaster >& /dev/null && [ -f /var/lock/subsys/postgresql ] ; then
+    if status $PGWATCH >& /dev/null && [ -f $PGLOCK ] ; then
        # The only way we can be sure is if we can access it
        for i in $(seq 1 10) ; do
            # Must do this as the postgres user initially (before we
@@ -276,16 +313,18 @@ function start() {
     [ "$SFA_FLASHPOLICY_ENABLED" == 1 ] && \
         action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
 
-    touch /var/lock/subsys/sfa-start.py
+    touch $SFALOCK
 
 }
 
 function stop() {
     action $"Shutting down SFA" killproc sfa-start.py
+# a possible alternative reads; esp. as we remove lock manually below
+#    echo $"Shutting down SFA" ; pkill '^sfa-start'
 
     db_stop
 
-    rm -f /var/lock/subsys/sfa-start.py
+    rm -f $SFALOCK
 }
 
 
@@ -295,13 +334,16 @@ case "$1" in
     reload) reload force ;;
     restart) stop; start ;;
     condrestart)
-       if [ -f /var/lock/subsys/sfa-start.py ]; then
+       if [ -f $SFALOCK ]; then
             stop
             start
        fi
        ;;
     status)
        status sfa-start.py
+# possible alternative for debian
+#      pids=$(pgrep '^sfa-start'); [ -n "$pids" ] && ps $pids
+
        RETVAL=$?
        ;;
     dbdump)
index 43d4786..61757df 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -9,6 +9,8 @@ from glob import glob
 import shutil
 from distutils.core import setup
 
+from sfa.util.version import version_tag
+
 scripts = glob("clientbin/*.py") + \
     [ 
     'config/sfa-config-tty',
@@ -47,7 +49,7 @@ packages = [
     ]
 
 initscripts = [ 'sfa', 
-#                'sfa-cm',
+                'functions.sfa',
                 ]
 
 data_files = [ ('/etc/sfa/', [ 'config/aggregates.xml',
@@ -107,5 +109,9 @@ else:
     setup(name='sfa',
           packages = packages, 
           data_files = data_files,
-          scripts = scripts)
+          scripts = scripts,
+          url="http://svn.planet-lab.org/wiki/SFATutorial",
+          author="Thierry Parmentelat, Tony Mack, Scott Baker",
+          author_email="thierry.parmentelat@inria.fr, tmack@princeton.cs.edu, smbaker@gmail.com",
+          version=version_tag)
 
index f5f8e50..fc1aff5 100644 (file)
--- a/sfa.spec
+++ b/sfa.spec
@@ -67,7 +67,7 @@ Group: Applications/System
 Requires: sfa
 
 %package client
-Summary: the SFA experimenter-side CLI
+Summary: sfi, the SFA experimenter-side CLI
 Group: Applications/System
 Requires: sfa
 Requires: pyOpenSSL >= 0.7
index 95795ee..8f9682f 100644 (file)
@@ -1405,7 +1405,8 @@ or with an slice hrn, shows currently provisioned resources
             self.print_help()
             sys.exit(1)
         target_hrn = args[0]
-        gid = self.registry().CreateGid(self.my_credential_string, target_hrn, self.client_bootstrap.my_gid_string())
+        my_gid_string = open(self.client_bootstrap.my_gid()).read() 
+        gid = self.registry().CreateGid(self.my_credential_string, target_hrn, my_gid_string)
         if options.file:
             filename = options.file
         else:
diff --git a/sfa/generic/importer.py b/sfa/generic/importer.py
new file mode 100644 (file)
index 0000000..83e72d2
--- /dev/null
@@ -0,0 +1,408 @@
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+
+from sfa.trust.gid import create_uuid    
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+class Importer:
+
+    def __init__ (self, auth_hierarchy, logger):
+        self.auth_hierarchy = auth_hierarchy
+        self.logger=logger
+
+    def add_options (self, parser):
+        # We don't have any options for now
+        pass
+
+    # hrn hash is initialized from current db
+    # remember just-created records as we go
+    # xxx might make sense to add a UNIQUE constraint in the db itself
+    def remember_record_by_hrn (self, record):
+        tuple = (record.type, record.hrn)
+        if tuple in self.records_by_type_hrn:
+            self.logger.warning ("Importer.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+            return
+        self.records_by_type_hrn [ tuple ] = record
+
+    # ditto for pointer hash
+    def remember_record_by_pointer (self, record):
+        if record.pointer == -1:
+            self.logger.warning ("Importer.remember_record_by_pointer: pointer is void")
+            return
+        tuple = (record.type, record.pointer)
+        if tuple in self.records_by_type_pointer:
+            self.logger.warning ("Importer.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+            return
+        self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+    def remember_record (self, record):
+        self.remember_record_by_hrn (record)
+        self.remember_record_by_pointer (record)
+
+    def locate_by_type_hrn (self, type, hrn):
+        return self.records_by_type_hrn.get ( (type, hrn), None)
+
+    def locate_by_type_pointer (self, type, pointer):
+        return self.records_by_type_pointer.get ( (type, pointer), None)
+
+    ############################################################################
+    # Object import functions (authorities, resources, users, slices)
+    #
+
+    def import_auth(self, auth, parent_auth_hrn):
+        """
+        @return HRN of the newly created authority
+        """
+        auth_hrn = self.get_auth_naming(auth, parent_auth_hrn)
+        auth_urn = hrn_to_urn(auth_hrn, 'authority')
+
+        # import if hrn is not in list of existing hrns or if the hrn exists
+        # but its not a auth record
+        auth_record=self.locate_by_type_hrn ('authority', auth_hrn)
+        if not auth_record:
+            try:
+                # We ensure the parent is created through the root
+                #if not self.auth_hierarchy.auth_exists(auth_urn):
+                #    self.auth_hierarchy.create_auth(auth_urn)
+                auth_info = self.auth_hierarchy.get_auth_info(auth_urn)
+                auth_record = RegAuthority(hrn = auth_hrn, gid = auth_info.get_gid_object(),
+                                           pointer = 0,
+                                           authority = get_authority(auth_hrn))
+                auth_record.just_created()
+                dbsession.add(auth_record)
+                dbsession.commit()
+                self.logger.info("Importer: imported authority (auth) : %s" % auth_record) 
+                self.remember_record (auth_record)
+            except Exception, e:
+                # if the auth import fails then there is no point in trying to import the
+                # auth's child records (node, slices, persons), so skip them.
+                raise Exception, "Importer: failed to import auth. Skipping child records : %s" % e
+        else:
+            # xxx update the record ...
+            pass
+        auth_record.stale=False
+
+        return auth_hrn
+
+    def import_resource(self, resource, parent_auth_hrn):
+        """
+        @return HRN of the newly created resource
+        """
+        resource_hrn = self.get_resource_naming(resource, parent_auth_hrn)
+        resource_urn = hrn_to_urn(resource_hrn, 'node')
+
+        resource_record = self.locate_by_type_hrn ('node', resource_hrn )
+        if not resource_record:
+            try:
+                pkey = Keypair(create=True)
+                resource_gid = self.auth_hierarchy.create_gid(resource_urn, create_uuid(), pkey)
+                resource_record = RegNode (hrn = resource_hrn, gid = resource_gid, 
+                                       pointer = resource['id'],
+                                       authority = get_authority(resource_hrn))
+                resource_record.just_created()
+                dbsession.add(resource_record)
+                dbsession.commit()
+                self.logger.info("Importer: imported resource: %s" % resource_record)  
+                self.remember_record (resource_record)
+            except:
+                   self.logger.log_exc("Importer: failed to import resource")
+        else:
+            # xxx update the record ...
+            pass
+        
+        resource_record.stale=False
+
+        return resource_hrn
+
+    def init_user_key(self, user):
+        pubkey = None
+        pkey = None
+        if user['keys']:
+            # pick first working key in set
+            for pubkey in user['keys']:
+                 try:
+                    pkey = convert_public_key(pubkey)
+                    break
+                 except:
+                    continue
+            if not pkey:
+                self.logger.warn('Importer: unable to convert public key for %s' % user_hrn)
+                pkey = Keypair(create=True)
+        else:
+            # the user has no keys. Creating a random keypair for the user's gid
+            self.logger.warn("Importer: user %s does not have a public key on the testbed"%user_hrn)
+            pkey = Keypair(create=True)
+        return (pubkey, pkey)
+
+    def import_user(self, user, parent_auth_hrn):
+        """
+        @return HRN of the newly created user
+        """
+        user_hrn = self.get_user_naming(user, parent_auth_hrn)
+        user_urn = hrn_to_urn(user_hrn, 'user')
+
+        # return a tuple pubkey (a public key) and pkey (a Keypair object)
+
+        user_record = self.locate_by_type_hrn ( 'user', user_hrn)
+        try:
+            if not user_record:
+                (pubkey,pkey) = self.init_user_key (user)
+                user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+                user_gid.set_email(user['email'])
+                user_record = RegUser(hrn = user_hrn, gid = user_gid, 
+                                         pointer = user['id'], 
+                                         authority = get_authority(user_hrn),
+                                         email = user['email'])
+                if pubkey: 
+                    user_record.reg_keys=[RegKey(pubkey)]
+                else:
+                    self.logger.warning("No key found for user %s" % user_record)
+                user_record.just_created()
+                dbsession.add (user_record)
+                dbsession.commit()
+                self.logger.info("Importer: imported user: %s" % user_record)
+                self.remember_record ( user_record )
+            else:
+                # update the record ?
+                # if user's primary key has changed then we need to update the 
+                # users gid by forcing an update here
+                sfa_keys = user_record.reg_keys
+                def key_in_list (key,sfa_keys):
+                    for reg_key in sfa_keys:
+                        if reg_key.key==key: return True
+                    return False
+                # is there a new key ? XXX understand ?
+                new_keys=False
+                for key in user['keys']:
+                    if not key_in_list (key,sfa_keys):
+                        new_keys = True
+                if new_keys:
+                    (pubkey,pkey) = init_user_key (user)
+                    user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+                    if not pubkey:
+                        user_record.reg_keys=[]
+                    else:
+                        user_record.reg_keys=[ RegKey (pubkey)]
+                    self.logger.info("Importer: updated user: %s" % user_record)
+            user_record.email = user['email']
+            dbsession.commit()
+            user_record.stale=False
+        except:
+            self.logger.log_exc("Importer: failed to import user %s %s"%(user['id'],user['email']))
+
+        return user_hrn
+
+    def import_slice(self, slice, parent_auth_hrn):
+        """
+        @return HRN of the newly created slice
+        """
+        slice_hrn = self.get_slice_naming(slice, parent_auth_hrn)
+        slice_urn = hrn_to_urn(slice_hrn, 'slice')
+
+        slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
+        if not slice_record:
+            try:
+                pkey = Keypair(create=True)
+                slice_gid = self.auth_hierarchy.create_gid(slice_urn, create_uuid(), pkey)
+                slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid, 
+                                         pointer=slice['id'],
+                                         authority=get_authority(slice_hrn))
+                slice_record.just_created()
+                dbsession.add(slice_record)
+                dbsession.commit()
+                self.logger.info("Importer: imported slice: %s" % slice_record)  
+                self.remember_record ( slice_record )
+            except:
+                self.logger.log_exc("Importer: failed to import slice")
+        else:
+            # xxx update the record ...
+            self.logger.warning ("Slice update not yet implemented")
+            pass
+        # record current users affiliated with the slice
+        slice_record.reg_researchers = \
+              [ self.locate_by_type_pointer ('user',int(id)) for id in slice['user_ids'] ]
+        dbsession.commit()
+        slice_record.stale=False
+
+        return slice_hrn
+
+    ############################################################################
+    # Recursive import
+    #
+    def import_auth_rec(self, auth, parent=None):
+        """
+        Import authority and related objects (resources, users, slices), then
+        recurse through all subauthorities.
+
+        @param auth authority to be processed.
+        @return 1 if successful, exception otherwise
+        """
+
+        # Create entry for current authority
+        try:
+            auth_hrn = self.import_auth(auth, parent)
+
+            # Import objects related to current authority
+            if auth['resource_ids']:
+                for resource_id in auth['resource_ids']:
+                    self.import_resource(self.resources_by_id[resource_id], auth_hrn)
+            if auth['user_ids']:
+                for user_id in auth['user_ids']:
+                    self.import_user(self.users_by_id[user_id], auth_hrn)
+            if auth['slice_ids']:
+                for slice_id in auth['slice_ids']:
+                    self.import_slice(self.slices_by_id[slice_id], auth_hrn)
+
+            # Recursive import of subauthorities
+            if auth['auth_ids']:
+                for auth_id in auth['auth_ids']:
+                    self.import_auth_rec(self.authorities_by_id[auth_id], auth_hrn)
+        except Exception, e:
+            self.logger.log_exc(e)
+            pass
+
+    def locate_by_type_hrn (self, type, hrn):
+        return self.records_by_type_hrn.get ( (type, hrn), None)
+
+    ############################################################################
+    # Main processing function
+    #
+    def run (self, options):
+        config = Config ()
+        interface_hrn = config.SFA_INTERFACE_HRN
+        root_auth = config.SFA_REGISTRY_ROOT_AUTH
+        # <mytestbed> shell = NitosShell (config)
+
+        ######## retrieve all existing SFA objects
+        all_records = dbsession.query(RegRecord).all()
+
+        # create hash by (type,hrn) 
+        # we essentially use this to know if a given record is already known to SFA 
+        self.records_by_type_hrn = \
+            dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+        # create hash by (type,pointer) 
+        self.records_by_type_pointer = \
+            dict ( [ ( (record.type, record.pointer) , record ) for record in all_records 
+                     if record.pointer != -1] )
+
+        # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+        for record in all_records: record.stale=True
+
+        ######## Data collection
+
+       # Here we make the adaptation between the testbed API, and dictionaries with required fields
+
+        # AUTHORITIES
+        authorities = self.get_authorities()
+        self.authorities_by_id = {}
+        if authorities:
+            self.authorities_by_id = dict([(auth['id'], auth) for auth in authorities])
+
+        # USERS & KEYS
+        users = self.get_users()
+        self.users_by_id = {}
+        self.keys_by_id = {}
+        if users:
+            self.users_by_id = dict ( [ ( user['id'], user) for user in users ] )
+            self.keys_by_id = dict ( [ ( user['id'], user['keys']) for user in users ] ) 
+
+        # RESOURCES
+        resources = self.get_resources()
+        self.resources_by_id = {}
+        if resources:
+            self.resources_by_id = dict ( [ (resource['id'], resource) for resource in resources ] )
+
+        # SLICES
+        slices = self.get_slices()
+        self.slices_by_id = {}
+        if slices:
+            self.slices_by_id = dict ( [ (slice['id'], slice) for slice in slices ] )
+
+        ######## Import process
+
+        if authorities:
+            # Everybody belongs to sub-authorities, and we rely on the different
+            # subauthorities to give appropriate pointers to objects.
+            root = {
+                'id': 0,
+                'name': interface_hrn,
+                'auth_ids': self.authorities_by_id.keys(),
+                'user_ids': None,
+                'resource_ids': None,
+                'slice_ids': None
+            }
+        else:
+            # We create a root authority with all objects linked to it.
+            root = {
+                'id': 0,
+                'name': interface_hrn,
+                'auth_ids': self.authorities_by_id.keys(),
+                'user_ids': self.users_by_id.keys(),
+                'resource_ids': self.resources_by_id.keys(),
+                'slice_ids': self.slices_by_id.keys()
+            }
+
+        # Recurse through authorities and import the different objects
+        self.import_auth_rec(root)
+
+        ######## Remove stale records
+
+        # special records must be preserved
+        system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+        for record in all_records: 
+            if record.hrn in system_hrns: 
+                record.stale=False
+            if record.peer_authority:
+                record.stale=False
+
+        for record in all_records:
+            try:
+                stale = record.stale
+            except:     
+                stale = True
+                self.logger.warning("stale not found with %s"%record)
+            if stale:
+                self.logger.info("Importer: deleting stale record: %s" % record)
+                dbsession.delete(record)
+                dbsession.commit()
+
+    ############################################################################ 
+    # Testbed specific functions
+
+    # OBJECTS
+
+    def get_authorities(self):
+        raise Exception, "Not implemented"
+
+    def get_resources(self):
+        raise Exception, "Not implemented"
+
+    def get_users(self):
+        raise Exception, "Not implemented"
+
+    def get_slices(self):
+        raise Exception, "Not implemented"
+
+    # NAMING
+    
+    def get_auth_naming(self, site, interface_hrn):
+        raise Exception, "Not implemented"
+
+    def get_resource_naming(self, site, node):
+        raise Exception, "Not implemented"
+
+    def get_user_naming(self, site, user):
+        raise Exception, "Not implemented"
+
+    def get_slice_naming(self, site, slice):
+        raise Exception, "Not implemented"
+
+if __name__ == "__main__":
+       from sfa.util.sfalogging import logger
+       importer = Importer("mytestbed", logger)
+       importer.run(None)
diff --git a/sfa/generic/wrapper.py b/sfa/generic/wrapper.py
new file mode 100644 (file)
index 0000000..2e89403
--- /dev/null
@@ -0,0 +1,82 @@
+class Wrapper:
+
+    def match_dict(self, dic, filter):
+       # We suppose if a field is in filter, it is therefore in the dic
+       if not filter:
+           return True
+       match = True
+       for k, v in filter.items():
+           if k[0] in Filter.modifiers:
+               op = k[0]
+               k = k[1:]
+           elif k in ['-SORT', '-LIMIT', '-OFFSET']:
+               continue;
+           else:
+               op = '='
+
+           if op == '=':
+               if isinstance(v, list):
+                   match &= (dic[k] in v) # array ?
+               else:
+                   match &= (dic[k] == v)
+           elif op == '~':
+               if isinstance(v, list):
+                   match &= (dic[k] not in v) # array ?
+               else:
+                   match &= (dic[k] != v) # array ?
+           elif op == '<':
+               if isinstance(v, StringTypes):
+                   # prefix match
+                   match &= dic[k].startswith('%s.' % v)
+               else:
+                   match &= (dic[k] < v)
+           elif op == '[':
+               if isinstance(v, StringTypes):
+                   match &= dic[k] == v or dic[k].startswith('%s.' % v)
+               else:
+                   match &= (dic[k] <= v)
+           elif op == '>':
+               if isinstance(v, StringTypes):
+                   # prefix match
+                   match &= v.startswith('%s.' % dic[k])
+               else:
+                   match &= (dic[k] > v)
+           elif op == ']':
+               if isinstance(v, StringTypes):
+                   # prefix match
+                   match &= dic[k] == v or v.startswith('%s.' % dic[k])
+               else:
+                   match &= (dic[k] >= v)
+           elif op == '&':
+               match &= (dic[k] & v) # array ?
+           elif op == '|':
+               match &= (dic[k] | v) # array ?
+           elif op == '{':
+               match &= (v in dic[k])
+       return match
+
+    def project_select_and_rename_fields(self, table, pkey, filters, fields):
+        filtered = []
+        for row in table:
+            # apply input filters 
+            if self.selection or self.match_dict(row, filters):
+                # apply output_fields
+                if self.projection:
+                    filtered.append(row)
+                else:
+                    c = {}
+                    for k,v in row.items():
+                        # if no fields = keep everything
+                        if not fields or k in fields or k == pkey:
+                            c[k] = v
+                    filtered.append(c)
+        return filtered
+
+    def get_objects(self, method, filters=None, fields=None):
+        if not method in ['authorities', 'resources', 'users', 'slices']:
+            raise Exception, "Unknown object type"
+        results = self.get(method, filters, fields)
+        # Perform missing operations
+        if results and (filter and not self.selection) or (fields and not self.projection):
+            results = self.project_select_and_rename_fields(results, 'id', filters, fields)
+        return results
diff --git a/stdeb.cfg b/stdeb.cfg
new file mode 100644 (file)
index 0000000..4848b75
--- /dev/null
+++ b/stdeb.cfg
@@ -0,0 +1,4 @@
+[DEFAULT]
+Depends: python (>=2.5), python-lxml, python-openssl (>=0.7), python-m2crypto, python-dateutil, libxmlsec1-openssl, python-libxslt1, python-zsi, uuid-runtime, python-setuptools, postgresql (>= 8.2), python-psycopg2, python-sqlalchemy, python-migrate, xmlsec1, sysvinit-utils
+# python-xmlbuilder (unavailable, and required only for the eucalyptus driver)
+# sysvinit-utils for pidof
diff --git a/wsdl/globals.py b/wsdl/globals.py
deleted file mode 100644 (file)
index 36c1042..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/python
-
-plc_ns="http://www.planet-lab.org/sfa"
-version="2009/07"
index 0cfc8f1..a9e3c32 100755 (executable)
@@ -9,16 +9,15 @@ import os, sys
 import time
 import pdb
 import xml.dom.minidom
-#import xml.dom.ext
 import apistub
 import inspect
 
 from types import *
 from optparse import OptionParser
 
-from sfa.storage.parameter import Parameter,Mixed
+from sfa.storage.parameter import Parameter, Mixed
 
-import globals
+plc_ns="http://www.planet-lab.org/sfa"
 
 class SoapError(Exception):
      def __init__(self, value):
@@ -273,7 +272,7 @@ class WSDLGen:
                         servport_el.setAttribute("binding", "tns:" + name + "_binding")
 
                         soapaddress = servport_el.appendChild(self.wsdl.createElement("soap:address"))
-                        soapaddress.setAttribute("location", "%s/%s" % (globals.plc_ns,service))
+                        soapaddress.setAttribute("location", "%s/%s" % (plc_ns,service))
 
 
     def compute_wsdl_definitions(self):
@@ -289,7 +288,7 @@ class WSDLGen:
             xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
             xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
             xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"/>
-            """ % (self.interface_name(),globals.plc_ns,globals.plc_ns,globals.plc_ns)
+            """ % (self.interface_name(),plc_ns,plc_ns,plc_ns)
             
         self.wsdl = xml.dom.minidom.parseString(wsdl_text_header)
         
@@ -310,7 +309,7 @@ class WSDLGen:
             <types>
                 <xsd:schema xmlns="http://www.w3.org/2001/XMLSchema" targetNamespace="%s/schema"/>
             </types>
-        </wsdl:definitions> """ % (self.interface_name(),globals.plc_ns, globals.plc_ns, globals.plc_ns, globals.plc_ns)
+        </wsdl:definitions> """ % (self.interface_name(),plc_ns, plc_ns, plc_ns, plc_ns)
         self.types = xml.dom.minidom.parseString(wsdl_text_header)
         
 
@@ -327,10 +326,9 @@ class WSDLGen:
 
     def pretty_print(self):
         if (self.wsdl):
-            #xml.dom.ext.PrettyPrint(self.wsdl)
-            xml.dom.minidom.Document.toprettyxml(self.wsdl)
+             print xml.dom.minidom.Document.toprettyxml(self.wsdl)
         else:
-            raise Exception("Empty WSDL")
+             raise Exception("Empty WSDL")
 
 def main():
     parser = OptionParser()