*.dbg
*.hide
*-sample
+.auto*
#
-## (Re)builds Python metafile (__init__.py)
-#
+## (Re)builds Python metafile (__init__.py)
+#
# overwritten by the specfile
DESTDIR="/"
PREFIX=/usr
##########
-all: python wsdl
+all: python wsdl
install: python-install wsdl-install tests-install
-clean: python-clean wsdl-clean
+clean: python-clean wsdl-clean
uninstall: python-uninstall tests-uninstall
python: version
-version: sfa/util/version.py
+version: sfa/util/version.py
sfa/util/version.py: sfa/util/version.py.in force
sed -e "s,@VERSIONTAG@,$(VERSIONTAG),g" -e "s,@SCMURL@,$(SCMURL),g" sfa/util/version.py.in > $@
version-clean:
rm -f sfa/util/version.py
-.PHONY: python version python-install python-clean version-clean
+.PHONY: python version python-install python-clean version-clean
##########
-wsdl:
- $(MAKE) -C wsdl
+wsdl:
+ $(MAKE) -C wsdl
# propagate DESTDIR from the specfile
wsdl-install:
- $(MAKE) -C wsdl install
+ $(MAKE) -C wsdl install
wsdl-clean:
$(MAKE) -C wsdl clean
.PHONY: wsdl wsdl-install wsdl-clean
-######################################## debian packaging
-# The 'debian' target is called from the build with the following variables set
-# (see build/Makefile and target_debian)
-# (.) RPMTARBALL
-# (.) RPMVERSION
-# (.) RPMRELEASE
-# (.) RPMNAME
-#
-PROJECT=$(RPMNAME)
-DEBVERSION=$(RPMVERSION).$(RPMRELEASE)
-DEBTARBALL=../$(PROJECT)_$(DEBVERSION).orig.tar.bz2
-
-DATE=$(shell date -u +"%a, %d %b %Y %T")
-
-debian: debian/changelog debian.source debian.package
-
-debian/changelog: debian/changelog.in
- sed -e "s|@VERSION@|$(DEBVERSION)|" -e "s|@DATE@|$(DATE)|" debian/changelog.in > debian/changelog
-
-debian.source: force
- rsync -a $(RPMTARBALL) $(DEBTARBALL)
-
-debian.package:
- debuild -uc -us -b
-
-debian.clean:
- $(MAKE) -f debian/rules clean
- rm -rf build/ MANIFEST ../*.tar.gz ../*.dsc ../*.build
- find . -name '*.pyc' -delete
-
##########
tests-install:
mkdir -p $(DESTDIR)/usr/share/sfa/tests
########## refreshing methods package metafile
# Metafiles - manage Legacy/ and Accessors by hand
-init := sfa/methods/__init__.py
+init := sfa/methods/__init__.py
index: $(init)
ifneq ($(methods_now),$(methods_files))
sfa/methods/__init__.py: force
endif
-sfa/methods/__init__.py:
+sfa/methods/__init__.py:
(echo '## Please use make index to update this file' ; echo 'all = """' ; cd sfa/methods; ls -1 *.py | grep -v __init__ | sed -e 's,.py$$,,' ; echo '""".split()') > $@
force:
##########
# a lot of stuff in the working dir is just noise
files:
- @find . -type f | egrep -v '^\./\.|/\.git/|/\.svn/|TAGS|AA-|~$$|egg-info|\.(py[co]|doc|html|pdf|png|svg|out|bak|dg|pickle)$$'
+ @find . -type f | egrep -v '^\./\.|/\.git/|/\.svn/|TAGS|AA-|~$$|egg-info|\.(py[co]|doc|html|pdf|png|svg|out|bak|dg|pickle)$$'
git-files:
@git ls-files | egrep -v '\.(doc|html|pdf)$$'
-tags:
+tags:
$(MAKE) git-files | xargs etags
.PHONY: files tags
# and I could not find out why
# so I went for the manual method instead
# there also was a web dialog prompting for a zip file that would
-# be used to initialize the project's home dir but this too
+# be used to initialize the project's home dir but this too
# did not seem to work the way I was trying to use it, so ...
# this target is still helpful to produce the readme in html from README.md
# we need to re-run make so the version is right
git_pypi: git pypi
-git:
+git:
git pull
$(MAKE) version
endif
endif
-synccheck:
+synccheck:
ifeq (,$(SSHURL))
@echo "sync: I need more info from the command line, e.g."
@echo " make sync PLC=boot.planetlab.eu"
@exit 1
endif
-LOCAL_RSYNC_EXCLUDES += --exclude '*.pyc'
+LOCAL_RSYNC_EXCLUDES += --exclude '*.pyc'
LOCAL_RSYNC_EXCLUDES += --exclude '*.png' --exclude '*.svg' --exclude '*.out'
RSYNC_EXCLUDES := --exclude .svn --exclude .git --exclude '*~' --exclude TAGS $(LOCAL_RSYNC_EXCLUDES)
RSYNC_COND_DRY_RUN := $(if $(findstring n,$(MAKEFLAGS)),--dry-run,)
CLIENTS = $(shell ls clientbin/*.py)
-BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
+BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py ./systemd/sfa-setup.sh \
./sfa/server/sfa-start.py \
./clientbin/sfaadmin.py \
$(CLIENTS)
+$(RSYNC) ./sfa/storage/migrations/versions/*.py $(SSHURL)/usr/share/sfa/migrations/versions/
syncbin: synccheck
+$(RSYNC) $(BINS) $(SSHURL)/usr/bin/
-syncinit: synccheck
- +$(RSYNC) ./init.d/sfa $(SSHURL)/etc/init.d/
+syncservices: synccheck
+ +$(RSYNC) ./systemd/*.service $(SSHURL)/lib/systemd/system
+ +$(RSYNC) ./systemd/sfa-setup.sh $(SSHURL)/usr/bin
syncconfig:
+$(RSYNC) ./config/default_config.xml $(SSHURL)/etc/sfa/
synctest: synccheck
# full-fledged
-sync: synclib syncbin syncinit syncconfig syncrestart
-syncdeb: synclibdeb syncbin syncinit syncconfig syncrestart
+sync: synclib syncbin syncservices syncconfig syncrestart
+syncdeb: synclibdeb syncbin syncservices syncconfig syncrestart
# 99% of the time this is enough
syncfast: synclib syncrestart
sfa/__init__.py \
sfa/client/{sfaserverproxy,sfaclientlib,__init__}.py \
sfa/trust/{certificate,__init__}.py \
-sfa/util/{sfalogging,faults,genicode,enumeration,__init__}.py
+sfa/util/{sfalogging,faults,genicode,enumeration,__init__}.py
-clientlibsync:
+clientlibsync:
@[ -d "$(CLIENTLIBTARGET)" ] || { echo "You need to set the make variable CLIENTLIBTARGET"; exit 1; }
rsync -av --relative $(CLIENTLIBFILES) $(CLIENTLIBTARGET)
<!--
Default SFA configuration file
-Thierry Parmentelat
+Thierry Parmentelat
-->
<variable id="generic_flavour" type="string">
<name>Generic Flavour</name>
<value>pl</value>
- <description>This string refers to a class located in sfa.generic that describes
+ <description>This string refers to a class located in sfa.generic that describes
which specific implementation needs to be used for api, manager and driver objects.
PlanetLab users do not need to change this setting.
</description>
<value>0</value>
<description>Logging level; 0=minimum, 1=info, 2=debug</description>
</variable>
-
+
<variable id="max_slice_renew" type="int">
<name>Max Slice Renew</name>
<value>60</value>
<name>User Session Keys Path </name>
<value>/var/lib/sfa/session_keys</value>
<description>Some services will peform operations on behalf of a user, but make
- it look like the user is the one performing the operation. Doing this requires a
- valid key pair and credential for the user. This option defines the path where
+ it look like the user is the one performing the operation. Doing this requires a
+ valid key pair and credential for the user. This option defines the path where
key pairs and credentials are generated and stored.
- This functionality is used by the SFA web GUI.
- </description>
+ This functionality is used by the SFA web GUI.
+ </description>
</variable>
<variable id="data_dir" type="string">
</variablelist>
</category>
- <!-- ======================================== -->
- <category id="sfa_sm">
- <name>Slice Manager</name>
- <description>The settings that affect the slice manager that will run
- as part of this SFA instance.</description>
-
- <variablelist>
- <variable id="enabled" type="boolean">
- <name>Enable Slice Manager</name>
- <value>false</value>
- <description>Allows this local SFA instance to run as a
- slice manager. Warning, this feature is not really supported
- any longer.</description>
- </variable>
-
- <variable id="host" type="hostname">
- <name>Hostname</name>
- <value>localhost</value>
- <description>The hostname where the slice manager is expected to
- be found.</description>
- </variable>
-
- <variable id="port" type="int">
- <name>Port number</name>
- <value>12347</value>
- <description>The port where the slice manager is to be found.</description>
- </variable>
-
- <variable id="caching" type="boolean">
- <name>Cache advertisement rspec</name>
- <value>false</value>
- <description>Enable caching of the global advertisement, as
- returned by ListResources without a slice argument. </description>
- </variable>
-
- </variablelist>
- </category>
-
<!-- ======================================== -->
<category id="sfa_aggregate">
<name>Aggregate</name>
</variablelist>
</category>
- <!-- ======================================== -->
- <category id="sfa_flashpolicy">
- <name>SFA Flash Policy</name>
- <description>The settings that affect the flash policy server that will run
- as part of this SFA instance.</description>
-
- <variablelist>
- <variable id="enabled" type="boolean">
- <name>Enable Flash Policy Server</name>
- <value>false</value>
- <description>Allows this local SFA instance to run a
- flash policy server.</description>
- </variable>
- <variable id="config_file" type="string">
- <name>Flash policy config file</name>
- <value>/etc/sfa/sfa_flashpolicy_config.xml</value>
- <description>The path to where the flash policy config file can be reached.</description>
- </variable>
- <variable id="port" type="int">
- <name>Flash policy port</name>
- <value>843</value>
- <description>The flash policy server port.</description>
- </variable>
- </variablelist>
- </category>
-
<!-- ======================================== -->
<category id="sfa_plc">
<name></name>
</variablelist>
</category>
- <!-- ======================================== -->
- <category id="sfa_federica">
- <name></name>
- <description>The settings that tell this SFA instance how to interact with the FEDERICA testbed.</description>
-
- <variablelist>
- <variable id="url" type="string">
- <name>XMLRPC URL</name>
- <value>https://root:password@federica.sfa.wrapper.com:8443/fedewrapper/xmlrpc/</value>
- <description>URL for the federica xmlrpc API; login and password need to be set like in http://login:password@hostname:port/the/path </description>
- </variable>
- </variablelist>
- </category>
-
- <!-- ======================================== -->
- <category id="sfa_nova">
- <name>SFA Flash Policy</name>
- <description>The settings that affect how SFA connects to
- the Nova/EC2 API</description>
- <variablelist>
- <variable id="user" type="string">
- <name>Sfa nova user</name>
- <value>novaadmin</value>
- <description>Account/context to use when performing
- administrative nova operations</description>
- </variable>
- <variable id="api_url" type="string">
- <name>Nova API url</name>
- <value>127.0.0.1</value>
- <description>The Nova/EC2 API url </description>
- </variable>
- <variable id="api_port" type="int">
- <name>Nova API Port</name>
- <value>8773</value>
- <description>The Nova/EC2 API port.</description>
- </variable>
- <variable id="novarc" type="string">
- <name>novarc</name>
- <value>/root/novarc</value>
- <description>Path to novarc client config file</description>
- </variable>
- </variablelist>
- </category>
-
- <!-- ======================================== -->
- <category id="sfa_nitos">
- <name></name>
- <description>The settings that tell this SFA instance how to interact with the NITOS testbed.</description>
-
- <variablelist>
- <variable id="url" type="string">
- <name>XMLRPC URL</name>
- <value>http://195.251.17.239:8080/RPC2</value>
- <description>URL for the NITOS Scheduler xmlrpc API</description>
- </variable>
- </variablelist>
- </category>
-
<!-- ======================================== -->
<category id="sfa_dummy">
<name></name>
+++ /dev/null
-#!/usr/bin/python
-import os
-import sys
-import socket
-sys.path.append('/usr/share/plc_api')
-from sfa.util.config import Config as SfaConfig
-from PLC.Config import Config as PlcConfig
-
-sfa_config = SfaConfig()
-plc_config = PlcConfig()
-default_host = socket.gethostbyname(socket.gethostname())
-all_vars = ['SFA_CONFIG_DIR', 'SFA_DATA_DIR', 'SFA_INTERFACE_HRN',
- 'SFA_CM_SLICE_PREFIX', 'SFA_REGISTRY_HOST', 'SFA_REGISTRY_PORT',
- 'SFA_AGGREGATE_HOST', 'SFA_AGGREGATE_PORT',
- 'SFA_SM_HOST', 'SFA_SM_PORT',
- 'SFA_CM_ENABLED', 'SFA_CM_HOST', 'SFA_CM_PORT', 'SFA_CM_TYPE', 'SFA_CM_SLICE_PREFIX',
- 'SFA_API_LOGLEVEL']
-
-defaults = {
- 'SFA_CM_ENABLED': '1',
- 'SFA_CM_HOST': 'localhost',
- 'SFA_CM_PORT': '12346',
- 'SFA_CM_SLICE_PREFIX': plc_config.PLC_SLICE_PREFIX,
- 'SFA_CM_TYPE': 'pl',
- 'SFA_API_LOGLEVEL': '0'
-}
-
-host_defaults = {
- 'SFA_REGISTRY_HOST': default_host,
- 'SFA_AGGREGATE_HOST': default_host,
- 'SFA_SM_HOST': default_host,
-}
-
-const_dict = {}
-for key in all_vars:
- value = ""
-
- if key in defaults:
- value = defaults[key]
- elif hasattr(sfa_config, key):
- value = getattr(sfa_config, key)
- # sfa_config may specify localhost instead of a resolvalbe host or ip
- # if so replace this with the host's address
- if key in host_defaults and value in ['localhost', '127.0.0.1']:
- value = host_defaults[key]
- const_dict[key] = value
-
-filename = sfa_config.config_path + os.sep + 'sfa_component_config'
-conffile = open(filename, 'w')
-format = '%s="%s"\n'
-
-for var in all_vars:
- conffile.write(format % (var, const_dict[var]))
-
-conffile.close()
"SFA_REGISTRY_ROOT_AUTH",
"SFA_REGISTRY_HOST",
"SFA_AGGREGATE_HOST",
- "SFA_SM_HOST",
"SFA_DB_HOST",
]
flavour_xml_section_hash = {
'pl': 'sfa_plc',
- 'openstack': 'sfa_nova',
- 'fd': 'sfa_federica',
- 'nitos': 'sfa_nitos',
'dummy': 'sfa_dummy',
}
configuration = {
def reload_service():
- global service
- os.system("set -x ; service %s reload" % service)
+ reload = "sfa-setup.sh reload"
+ print("Running: {}".format(reload))
+ os.system(reload)
####################
def restart_service():
- global service
- print("==================== Stopping %s" % service)
- os.system("service %s stop" % service)
- print("==================== Starting %s" % service)
- os.system("service %s start" % service)
+ services = ('sfa-db', 'sfa-aggregate', 'sfa-registry')
+ for service in services:
+ restart = ("systemctl -q is-active {s} && "
+ "{{ echo restarting {s} ; systemctl restart {s}; }}"
+ .format(s=service))
+ os.system(restart)
####################
+++ /dev/null
-# SFA Config file for the Component Manager
-#
-
-# HRN
-# Human readable name for this interface
-SFA_INTERFACE_HRN="plc"
-
-# loglevel
-# How deep are we logging (0|1|2)
-SFA_API_LOGLEVEL=1
-
-## ============================================================
-# Registry Configuration
-#
-# Enabled
-# Enable the registry interface
-SFA_REGISTRY_ENABLED=0
-
-# Root Auth
-# The hrn of the registry's root auth
-SFA_REGISTRY_ROOT_AUTH="plc"
-
-# Level1 Auth
-# The hrn of the registry's level1 auth (sub authority)
-# The full name of this interface (only secify if this
-# interface is a sub authority)
-# xxx could be determined from hrn above
-SFA_REGISTRY_LEVEL1_AUTH=""
-
-SFA_GENERIC_FLAVOUR='pl'
-
-# Hostname
-# The fully qualified hostname of the registry server
-SFA_REGISTRY_HOST="localhost"
-
-# Port
-# SFA registry port
-SFA_REGISTRY_PORT=12345
-
-## ==============================================================
-## Aggregate Configuration
-##
-## Enable aggregate inteface
-## Enable the aggregate inteface.
-SFA_AGGREGATE_ENABLED=0
-
-#
-#
-## Hostname
-## The fully qualified hostname of the aggregate server
-SFA_AGGREGATE_HOST="localhost"
-#
-## Port
-## SFA aggregate server port
-SFA_AGGREGATE_PORT=12346
-
-## ==============================================================
-# Slice Manager Configuration
-#
-# Enabled
-# Enable the slice manager
-SFA_SM_ENABLED=0
-
-# Host
-## The fully qualified hostname or IP address of the slice manager server
-SFA_SM_HOST="localhost"
-
-# Port
-# SFA slice manager server port
-SFA_SM_PORT=12347
-
-## ===============================================================
-# Component Manager Configuration
-#
-# Enabled
-## Enable the slice manager
-SFA_CM_ENABLED=1
-
-# Component Manager type
-#
-# The type of backend server for this component manager
-SFA_CM_TYPE='pl'
-#
-# Host
-## The fully qualified hostname or IP address of the slice manager server
-SFA_CM_HOST="localhost"
-#
-# Port
-# SFA slice manager server port
-SFA_CM_PORT=12346
-
-
-# Directory internal data gets stored
-SFA_CONFIG_DIR="/etc/sfa"
-
-# Directory internal data gets stored
-SFA_DATA_DIR="/var/lib/sfa"
-
+++ /dev/null
-sfa (@VERSION@) UNRELEASED; urgency=low
-
- * Initial release.
-
- -- Thierry Parmentelat <thierry.parmentelat@inria.fr> @DATE@ +0000
+++ /dev/null
-Source: sfa
-Maintainer: Thierry Parmentelat <Thierry.Parmentelat@inria.fr>
-Section: misc
-Priority: optional
-Standards-Version: 3.9.2
-Build-Depends: devscripts, debhelper (>=7.0.50~), debconf, dpatch, python-setuptools, make
-X-Python-Version: >= 2.7
-
-Package: sfa
-Architecture: any
-Depends: postgresql (>= 8.2), python-psycopg2, python-sqlalchemy, python-migrate, uuid-runtime, sfa-common
-Description: Server-side for SFA, generic implementation derived from PlanetLab
-
-Package: sfa-common
-Architecture: any
-Depends: python (>= 2.7), python-openssl (>= 0.7), python-m2crypto, python-dateutil, python-lxml, python-libxslt1, python-zsi, xmlsec1
-Description: Python libraries for SFA, generic implementation derived from PlanetLab
-
-Package: sfa-flashpolicy
-Architecture: any
-Depends: sfa-common
-Description: SFA support for flash clients
-
-Package: sfa-client
-Architecture: any
-Depends: sfa-common
-Description: sfi, the SFA experimenter-side CLI
-
-Package: sfa-plc
-Architecture: any
-Depends: sfa-common
-Description: the SFA layer around MyPLC
-
-Package: sfa-federica
-Architecture: any
-Depends: sfa-common
-Description: the SFA layer around Federica
-
-Package: sfa-nitos
-Architecture: any
-Depends: sfa-common
-Description: the SFA layer around NITOS
-
-Package: sfa-iotlab
-Architecture: any
-Depends: sfa-common, python-passlib, python-ldap
-Description: the SFA layer around IotLab
-
-Package: sfa-dummy
-Architecture: any
-Depends: sfa-common
-Description: the SFA layer around a Dummy Testbed
-
-Package: sfa-sfatables
-Architecture: any
-Depends: sfa-common
-Description: sfatables policy tool for SFA
-
-Package: sfa-tests
-Architecture: any
-Depends: sfa-common
-Description: unit tests suite for SFA
-
+++ /dev/null
-#!/usr/bin/make -f
-# -*- makefile -*-
-
-%:
- dh $@ --with python2 --buildsystem=python_distutils
-
-override_dh_prep:
- $(MAKE) version
- $(MAKE) wsdl
- dh_prep -O--buildsystem=python_distutils
+++ /dev/null
-etc/sfa/sfi_config
-usr/bin/sfi*.py*
-#usr/bin/sfi
-usr/bin/get*.py*
-usr/bin/setRecord.py*
-usr/bin/sfascan.py*
-#usr/bin/sfascan
-usr/bin/sfadump.py*
-usr/bin/sfax509.py*
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/__init__.py*
-usr/lib*/python*/dist-packages/sfa/trust
-usr/lib*/python*/dist-packages/sfa/storage
-usr/lib*/python*/dist-packages/sfa/util
-usr/lib*/python*/dist-packages/sfa/server
-usr/lib*/python*/dist-packages/sfa/methods
-usr/lib*/python*/dist-packages/sfa/generic
-usr/lib*/python*/dist-packages/sfa/managers
-usr/lib*/python*/dist-packages/sfa/importer
-usr/lib*/python*/dist-packages/sfa/rspecs
-usr/lib*/python*/dist-packages/sfa/client
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/dummy
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/federica
+++ /dev/null
-usr/bin/sfa_flashpolicy.py*
-etc/sfa/sfa_flashpolicy_config.xml
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/iotlab
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/nitos
+++ /dev/null
-usr/lib*/python*/dist-packages/sfa/planetlab
-usr/lib*/python*/dist-packages/sfa/openstack
-etc/sfa/pl.rng
-etc/sfa/credential.xsd
-etc/sfa/top.xsd
-etc/sfa/sig.xsd
-etc/sfa/xml.xsd
-etc/sfa/protogeni-rspec-common.xsd
-etc/sfa/topology
+++ /dev/null
-etc/sfatables/*
-usr/bin/sfatables
-usr/lib*/python*/dist-packages/sfatables
+++ /dev/null
-tests/*.py /usr/share/sfa/tests/
+++ /dev/null
-usr/bin/sfa-start.py*
-usr/bin/sfaadmin.py*
-#usr/bin/sfaadmin
-usr/bin/keyconvert.py*
-usr/bin/sfa-config-tty
-usr/bin/sfa-config
-etc/sfa/default_config.xml
-etc/sfa/aggregates.xml
-etc/sfa/registries.xml
-etc/init.d/sfa
-etc/init.d/functions.sfa
-usr/share/sfa/migrations
-usr/share/sfa/examples
-wsdl/*.wsdl var/www/html/wsdl/
+++ /dev/null
-#!/bin/bash
-# this file is not used yet
-# I take it the idea was to implement
-# something like chkconfig sfa on
-
-update-rc.d sfa defaults
+++ /dev/null
-3.0 (quilt)
pythondoc.py ../sfa/util/certificate.py ../sfa/util/credential.py ../sfa/util/gid.py \
../sfa/util/rights.py ../sfa/util/config.py ../sfa/trust/hierarchy.py \
../sfa/util/record.py ../sfa/util/client.py \
- ../sfa/server/sfaserver.py
+ ../sfa/server/sfaserver.py
pythondoc.py ../sfa/registry/registry.py ../sfa/registry/import.py \
../sfa/registry/nuke.py
-
- pythondoc.py ../component/component.py
+++ /dev/null
-<!DOCTYPE html PUBLIC '-//W3C//DTD XHTML 1.0 Strict//EN' 'http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd'>
-<html>
-<head>
-<meta http-equiv='Content-Type' content='text/html; charset=us-ascii' />
-<title>The component Module</title>
-</head>
-<body>
-<h1>The component Module</h1>
-<p>Geni Component Wrapper
-
-This wrapper implements the Geni Component Interface</p>
-<dl>
-<dt><b>ComponentManager(ip, port, key_file, cert_file)</b> (class) [<a href='#component.ComponentManager-class'>#</a>]</dt>
-<dd>
-<p>ComponentManager is a GeniServer that serves component interface requests.</p>
-<p>For more information about this class, see <a href='#component.ComponentManager-class'><i>The ComponentManager Class</i></a>.</p>
-</dd>
-</dl>
-<h2><a id='component.ComponentManager-class' name='component.ComponentManager-class'>The ComponentManager Class</a></h2>
-<dl>
-<dt><b>ComponentManager(ip, port, key_file, cert_file)</b> (class) [<a href='#component.ComponentManager-class'>#</a>]</dt>
-<dd>
-<p>ComponentManager is a GeniServer that serves component interface requests.</p>
-</dd>
-<dt><a id='component.ComponentManager.decode_ticket-method' name='component.ComponentManager.decode_ticket-method'><b>decode_ticket(ticket_string)</b></a> [<a href='#component.ComponentManager.decode_ticket-method'>#</a>]</dt>
-<dd>
-<p>Examine the ticket that was provided by the caller, check that it is
-signed and verified correctly. Throw an exception if something is
-wrong with the ticket.
-
-This is similar to geniserver.decode_authentication</p>
-<dl>
-<dt><i>ticket_string</i></dt>
-<dd>
-the string representation of the ticket</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.delete_slice-method' name='component.ComponentManager.delete_slice-method'><b>delete_slice(cred_str)</b></a> [<a href='#component.ComponentManager.delete_slice-method'>#</a>]</dt>
-<dd>
-<p>Delete a slice.</p>
-<dl>
-<dt><i>cred</i></dt>
-<dd>
-a credential identifying the caller (callerGID) and the slice
- (objectGID)</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.geni_ticket_to_plc_rec-method' name='component.ComponentManager.geni_ticket_to_plc_rec-method'><b>geni_ticket_to_plc_rec(ticket)</b></a> [<a href='#component.ComponentManager.geni_ticket_to_plc_rec-method'>#</a>]</dt>
-<dd>
-<p>Convert a geni ticket into a PLC record that can be submitted to the
-node manager. This involves some minor reformatting of the fields
-contained in the ticket.</p>
-<dl>
-<dt><i>ticket</i></dt>
-<dd>
-a ticket object</dd>
-<dt>Returns:</dt>
-<dd>
-a dictionary containing the PLC record info</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.list_slices-method' name='component.ComponentManager.list_slices-method'><b>list_slices(cred_str)</b></a> [<a href='#component.ComponentManager.list_slices-method'>#</a>]</dt>
-<dd>
-<p>List the slices on a component.</p>
-<dl>
-<dt><i>cred_str</i></dt>
-<dd>
-string representation of a credential object that
- authorizes the caller</dd>
-<dt>Returns:</dt>
-<dd>
-a list of slice names</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.reboot-method' name='component.ComponentManager.reboot-method'><b>reboot(cred_str)</b></a> [<a href='#component.ComponentManager.reboot-method'>#</a>]</dt>
-<dd>
-<p>Reboot the component.</p>
-<dl>
-<dt><i>cred_str</i></dt>
-<dd>
-string representation of a credential object that
- authorizes the caller</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.redeem_ticket-method' name='component.ComponentManager.redeem_ticket-method'><b>redeem_ticket(ticket_str)</b></a> [<a href='#component.ComponentManager.redeem_ticket-method'>#</a>]</dt>
-<dd>
-<p>Redeem a ticket.
-
-The ticket is submitted to the node manager, and the slice is instantiated
-or updated as appropriate.
-
-TODO: This operation should return a sliver credential and indicate
-whether or not the component will accept only sliver credentials, or
-will accept both sliver and slice credentials.</p>
-<dl>
-<dt><i>ticket_str</i></dt>
-<dd>
-the string representation of a ticket object</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.register_functions-method' name='component.ComponentManager.register_functions-method'><b>register_functions()</b></a> [<a href='#component.ComponentManager.register_functions-method'>#</a>]</dt>
-<dd>
-<p>Register the server RPCs for the component</p>
-</dd>
-<dt><a id='component.ComponentManager.reset_slice-method' name='component.ComponentManager.reset_slice-method'><b>reset_slice(cred_str)</b></a> [<a href='#component.ComponentManager.reset_slice-method'>#</a>]</dt>
-<dd>
-<p>Reset a slice.</p>
-<dl>
-<dt><i>cred</i></dt>
-<dd>
-a credential identifying the caller (callerGID) and the slice
- (objectGID)</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.start_slice-method' name='component.ComponentManager.start_slice-method'><b>start_slice(cred_str)</b></a> [<a href='#component.ComponentManager.start_slice-method'>#</a>]</dt>
-<dd>
-<p>Start a slice.</p>
-<dl>
-<dt><i>cred</i></dt>
-<dd>
-a credential identifying the caller (callerGID) and the slice
- (objectGID)</dd>
-</dl><br />
-</dd>
-<dt><a id='component.ComponentManager.stop_slice-method' name='component.ComponentManager.stop_slice-method'><b>stop_slice(cred_str)</b></a> [<a href='#component.ComponentManager.stop_slice-method'>#</a>]</dt>
-<dd>
-<p>Stop a slice.</p>
-<dl>
-<dt><i>cred</i></dt>
-<dd>
-a credential identifying the caller (callerGID) and the slice
- (objectGID)</dd>
-</dl><br />
-</dd>
-</dl>
-</body></html>
+++ /dev/null
-#!/usr/bin/env python
-#
-# flashpolicyd.py
-# Simple socket policy file server for Flash
-#
-# Usage: flashpolicyd.py [--port=N] --file=FILE
-#
-# Logs to stderr
-# Requires Python 2.5 or later
-
-from __future__ import with_statement
-import os
-import sys
-import optparse
-import socket
-import thread
-import exceptions
-import contextlib
-
-VERSION = 0.1
-
-
-def daemon():
- """Daemonize the current process."""
- if os.fork() != 0:
- os._exit(0)
- os.setsid()
- if os.fork() != 0:
- os._exit(0)
- os.umask(0)
- devnull = os.open(os.devnull, os.O_RDWR)
- os.dup2(devnull, 0)
- # xxx fixme - this is just to make sure that nothing gets stupidly lost -
- # should use devnull
- crashlog = os.open('/var/log/sfa_flashpolicy.log', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
- os.dup2(crashlog, 1)
- os.dup2(crashlog, 2)
-
-
-class policy_server(object):
-
- def __init__(self, port, path):
- self.port = port
- self.path = path
- self.policy = self.read_policy(path)
- self.log('Listening on port %d\n' % port)
- try:
- self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
- except AttributeError:
- # AttributeError catches Python built without IPv6
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- except socket.error:
- # socket.error catches OS with IPv6 disabled
- self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- self.sock.bind(('', port))
- self.sock.listen(5)
-
- def read_policy(self, path):
- with open(path, 'rb') as f:
- policy = f.read(10001)
- if len(policy) > 10000:
- raise exceptions.RuntimeError('File probably too large to be a policy file',
- path)
- if 'cross-domain-policy' not in policy:
- raise exceptions.RuntimeError('Not a valid policy file',
- path)
- return policy
-
- def run(self):
- try:
- while True:
- thread.start_new_thread(self.handle, self.sock.accept())
- except socket.error as e:
- self.log('Error accepting connection: %s' % e[1])
-
- def handle(self, conn, addr):
- addrstr = '%s:%s' % (addr[0], addr[1])
- try:
- self.log('Connection from %s' % addrstr)
- with contextlib.closing(conn):
- # It's possible that we won't get the entire request in
- # a single recv, but very unlikely.
- request = conn.recv(1024).strip()
- # if request != '<policy-file-request/>\0':
- # self.log('Unrecognized request from %s: %s' % (addrstr, request))
- # return
- self.log('Valid request received from %s' % addrstr)
- conn.sendall(self.policy)
- self.log('Sent policy file to %s' % addrstr)
- except socket.error as e:
- self.log('Error handling connection from %s: %s' % (addrstr, e[1]))
- except Exception as e:
- self.log('Error handling connection from %s: %s' % (addrstr, e[1]))
-
- def log(self, str):
- print >>sys.stderr, str
-
-
-def main():
- parser = optparse.OptionParser(usage='%prog [--port=PORT] --file=FILE',
- version='%prog ' + str(VERSION))
- parser.add_option('-p', '--port', dest='port', type=int, default=843,
- help='listen on port PORT', metavar='PORT')
- parser.add_option('-f', '--file', dest='path',
- help='server policy file FILE', metavar='FILE')
- parser.add_option("-d", "--daemon", dest="daemon", action="store_true",
- help="Run as daemon.", default=False)
- opts, args = parser.parse_args()
- if args:
- parser.error('No arguments are needed. See help.')
- if not opts.path:
- parser.error('File must be specified. See help.')
-
- try:
- if opts.daemon:
- daemon()
- policy_server(opts.port, opts.path).run()
- except Exception as e:
- print >> sys.stderr, e
- sys.exit(1)
- except KeyboardInterrupt:
- pass
-
-if __name__ == '__main__':
- main()
+++ /dev/null
-<?xml version="1.0"?>
-<!DOCTYPE cross-domain-policy SYSTEM "/xml/dtds/cross-domain-policy.dtd">
-
-<cross-domain-policy>
- <site-control permitted-cross-domain-policies="master-only"/>
- <allow-access-from domain="*" to-ports="80,443,12345,12346,12347" />
-</cross-domain-policy>
-
+++ /dev/null
-# -*-Shell-script-*-
-#
-# Thierry, jan 17 2013
-# this file was put together by Jordan to provide the same interface as
-# /etc/init.d/functions on fedora systems
-# (probably is extracted from one of the fedora releases as is, not sure about that)
-#
-# we unconditionnally ship this as /etc/init.d/functions.sfa,
-# and then our own initscript (init.d/sfa) does source that
-# conditionnally, i.e. when run on debian systems
-####################
-#
-# functions This file contains functions to be used by most or all
-# shell scripts in the /etc/init.d directory.
-#
-
-TEXTDOMAIN=initscripts
-
-# Make sure umask is sane
-umask 022
-
-# Set up a default search path.
-PATH="/sbin:/usr/sbin:/bin:/usr/bin"
-export PATH
-
-if [ $PPID -ne 1 -a -z "$SYSTEMCTL_SKIP_REDIRECT" ] && \
- ( /bin/mountpoint -q /cgroup/systemd || /bin/mountpoint -q /sys/fs/cgroup/systemd ) ; then
- case "$0" in
- /etc/init.d/*|/etc/rc.d/init.d/*)
- _use_systemctl=1
- ;;
- esac
-fi
-
-# ubuntu does not have /bin/systemctl
-[ -f /bin/systemctl ] || _use_systemctl=0
-
-
-systemctl_redirect () {
- local s
- local prog=${1##*/}
- local command=$2
-
- case "$command" in
- start)
- s=$"Starting $prog (via systemctl): "
- ;;
- stop)
- s=$"Stopping $prog (via systemctl): "
- ;;
- reload|try-reload)
- s=$"Reloading $prog configuration (via systemctl): "
- ;;
- restart|try-restart|condrestart)
- s=$"Restarting $prog (via systemctl): "
- ;;
- esac
-
- action "$s" /bin/systemctl $command "$prog.service"
-}
-
-# Get a sane screen width
-[ -z "${COLUMNS:-}" ] && COLUMNS=80
-
-#if [ -z "${CONSOLETYPE:-}" ]; then
-# if [ -r "/dev/stderr" ]; then
-# CONSOLETYPE="$(/sbin/consoletype < /dev/stderr)"
-# else
-# CONSOLETYPE="$(/sbin/consoletype)"
-# fi
-#fi
-
-if [ -z "${NOLOCALE:-}" ] && [ -z "${LANGSH_SOURCED:-}" ] && [ -f /etc/sysconfig/i18n ] ; then
- . /etc/profile.d/lang.sh 2>/dev/null
- # avoid propagating LANGSH_SOURCED any further
- unset LANGSH_SOURCED
-fi
-
-# Read in our configuration
-if [ -z "${BOOTUP:-}" ]; then
- if [ -f /etc/sysconfig/init ]; then
- . /etc/sysconfig/init
- else
- # This all seem confusing? Look in /etc/sysconfig/init,
- # or in /usr/doc/initscripts-*/sysconfig.txt
- BOOTUP=color
- RES_COL=60
- MOVE_TO_COL="echo -en \\033[${RES_COL}G"
- SETCOLOR_SUCCESS="echo -en \\033[1;32m"
- SETCOLOR_FAILURE="echo -en \\033[1;31m"
- SETCOLOR_WARNING="echo -en \\033[1;33m"
- SETCOLOR_NORMAL="echo -en \\033[0;39m"
- LOGLEVEL=1
- fi
- if [ "$CONSOLETYPE" = "serial" ]; then
- BOOTUP=serial
- MOVE_TO_COL=
- SETCOLOR_SUCCESS=
- SETCOLOR_FAILURE=
- SETCOLOR_WARNING=
- SETCOLOR_NORMAL=
- fi
-fi
-
-# Interpret escape sequences in an fstab entry
-fstab_decode_str() {
- fstab-decode echo "$1"
-}
-
-# Check if any of $pid (could be plural) are running
-checkpid() {
- local i
-
- for i in $* ; do
- [ -d "/proc/$i" ] && return 0
- done
- return 1
-}
-
-__readlink() {
- ls -bl "$@" 2>/dev/null| awk '{ print $NF }'
-}
-
-__fgrep() {
- s=$1
- f=$2
- while read line; do
- if strstr "$line" "$s"; then
- echo $line
- return 0
- fi
- done < $f
- return 1
-}
-
-# __umount_loop awk_program fstab_file first_msg retry_msg umount_args
-# awk_program should process fstab_file and return a list of fstab-encoded
-# paths; it doesn't have to handle comments in fstab_file.
-__umount_loop() {
- local remaining sig=
- local retry=3 count
-
- remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
- while [ -n "$remaining" -a "$retry" -gt 0 ]; do
- if [ "$retry" -eq 3 ]; then
- action "$3" fstab-decode umount $5 $remaining
- else
- action "$4" fstab-decode umount $5 $remaining
- fi
- count=4
- remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
- while [ "$count" -gt 0 ]; do
- [ -z "$remaining" ] && break
- count=$(($count-1))
- # jordan # usleep 500000
- sleep 0.5
- remaining=$(LC_ALL=C awk "/^#/ {next} $1" "$2" | sort -r)
- done
- [ -z "$remaining" ] && break
- fstab-decode /sbin/fuser -k -m $sig $remaining >/dev/null
- sleep 3
- retry=$(($retry -1))
- sig=-9
- done
-}
-
-# Similar to __umount loop above, specialized for loopback devices
-__umount_loopback_loop() {
- local remaining devremaining sig=
- local retry=3
-
- remaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $2}' /proc/mounts)
- devremaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $1}' /proc/mounts)
- while [ -n "$remaining" -a "$retry" -gt 0 ]; do
- if [ "$retry" -eq 3 ]; then
- action $"Unmounting loopback filesystems: " \
- fstab-decode umount $remaining
- else
- action $"Unmounting loopback filesystems (retry):" \
- fstab-decode umount $remaining
- fi
- for dev in $devremaining ; do
- losetup $dev > /dev/null 2>&1 && \
- action $"Detaching loopback device $dev: " \
- losetup -d $dev
- done
- remaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $2}' /proc/mounts)
- devremaining=$(awk '$1 ~ /^\/dev\/loop/ && $2 != "/" {print $1}' /proc/mounts)
- [ -z "$remaining" ] && break
- fstab-decode /sbin/fuser -k -m $sig $remaining >/dev/null
- sleep 3
- retry=$(($retry -1))
- sig=-9
- done
-}
-
-# __proc_pids {program} [pidfile]
-# Set $pid to pids from /var/run* for {program}. $pid should be declared
-# local in the caller.
-# Returns LSB exit code for the 'status' action.
-__pids_var_run() {
- local base=${1##*/}
- local pid_file=${2:-/var/run/$base.pid}
-
- pid=
- if [ -f "$pid_file" ] ; then
- local line p
-
- [ ! -r "$pid_file" ] && return 4 # "user had insufficient privilege"
- while : ; do
- read line
- [ -z "$line" ] && break
- for p in $line ; do
- [ -z "${p//[0-9]/}" ] && [ -d "/proc/$p" ] && pid="$pid $p"
- done
- done < "$pid_file"
-
- if [ -n "$pid" ]; then
- return 0
- fi
- return 1 # "Program is dead and /var/run pid file exists"
- fi
- return 3 # "Program is not running"
-}
-
-# Output PIDs of matching processes, found using pidof
-__pids_pidof() {
- pidof -c -o $$ -o $PPID -o %PPID -x "$1" || \
- pidof -c -o $$ -o $PPID -o %PPID -x "${1##*/}"
-# jordan # pidof -c -m -o $$ -o $PPID -o %PPID -x "$1" || \
-# jordan # pidof -c -m -o $$ -o $PPID -o %PPID -x "${1##*/}"
-}
-
-
-# A function to start a program.
-daemon() {
- # Test syntax.
- local gotbase= force= nicelevel corelimit
- local pid base= user= nice= bg= pid_file=
- local cgroup=
- nicelevel=0
- while [ "$1" != "${1##[-+]}" ]; do
- case $1 in
- '') echo $"$0: Usage: daemon [+/-nicelevel] {program}"
- return 1;;
- --check)
- base=$2
- gotbase="yes"
- shift 2
- ;;
- --check=?*)
- base=${1#--check=}
- gotbase="yes"
- shift
- ;;
- --user)
- user=$2
- shift 2
- ;;
- --user=?*)
- user=${1#--user=}
- shift
- ;;
- --pidfile)
- pid_file=$2
- shift 2
- ;;
- --pidfile=?*)
- pid_file=${1#--pidfile=}
- shift
- ;;
- --force)
- force="force"
- shift
- ;;
- [-+][0-9]*)
- nice="nice -n $1"
- shift
- ;;
- *) echo $"$0: Usage: daemon [+/-nicelevel] {program}"
- return 1;;
- esac
- done
-
- # Save basename.
- [ -z "$gotbase" ] && base=${1##*/}
-
- # See if it's already running. Look *only* at the pid file.
- __pids_var_run "$base" "$pid_file"
-
- [ -n "$pid" -a -z "$force" ] && return
-
- # make sure it doesn't core dump anywhere unless requested
- corelimit="ulimit -S -c ${DAEMON_COREFILE_LIMIT:-0}"
-
- # if they set NICELEVEL in /etc/sysconfig/foo, honor it
- [ -n "${NICELEVEL:-}" ] && nice="nice -n $NICELEVEL"
-
- # if they set CGROUP_DAEMON in /etc/sysconfig/foo, honor it
- if [ -n "${CGROUP_DAEMON}" ]; then
- if [ ! -x /bin/cgexec ]; then
- echo -n "Cgroups not installed"; warning
- echo
- else
- cgroup="/bin/cgexec";
- for i in $CGROUP_DAEMON; do
- cgroup="$cgroup -g $i";
- done
- fi
- fi
-
- # Echo daemon
- [ "${BOOTUP:-}" = "verbose" -a -z "${LSB:-}" ] && echo -n " $base"
-
- # And start it up.
- if [ -z "$user" ]; then
- $cgroup $nice /bin/bash -c "$corelimit >/dev/null 2>&1 ; $*"
- else
- $cgroup $nice runuser -s /bin/bash $user -c "$corelimit >/dev/null 2>&1 ; $*"
- fi
-
- [ "$?" -eq 0 ] && success $"$base startup" || failure $"$base startup"
-}
-
-# A function to stop a program.
-killproc() {
- local RC killlevel= base pid pid_file= delay
-
- RC=0; delay=3
- # Test syntax.
- if [ "$#" -eq 0 ]; then
- echo $"Usage: killproc [-p pidfile] [ -d delay] {program} [-signal]"
- return 1
- fi
- if [ "$1" = "-p" ]; then
- pid_file=$2
- shift 2
- fi
- if [ "$1" = "-d" ]; then
- delay=$2
- shift 2
- fi
-
-
- # check for second arg to be kill level
- [ -n "${2:-}" ] && killlevel=$2
-
- # Save basename.
- base=${1##*/}
-
- # Find pid.
- __pids_var_run "$1" "$pid_file"
- RC=$?
- if [ -z "$pid" ]; then
- if [ -z "$pid_file" ]; then
- pid="$(__pids_pidof "$1")"
- else
- [ "$RC" = "4" ] && { failure $"$base shutdown" ; return $RC ;}
- fi
- fi
-
- # Kill it.
- if [ -n "$pid" ] ; then
- [ "$BOOTUP" = "verbose" -a -z "${LSB:-}" ] && echo -n "$base "
- if [ -z "$killlevel" ] ; then
- if checkpid $pid 2>&1; then
- # TERM first, then KILL if not dead
- kill -TERM $pid >/dev/null 2>&1
- sleep 0.1
- # jordan # usleep 100000
- if checkpid $pid && sleep 1 &&
- checkpid $pid && sleep $delay &&
- checkpid $pid ; then
- kill -KILL $pid >/dev/null 2>&1
- sleep 0.1
- # jordan # usleep 100000
- fi
- fi
- checkpid $pid
- RC=$?
- [ "$RC" -eq 0 ] && failure $"$base shutdown" || success $"$base shutdown"
- RC=$((! $RC))
- # use specified level only
- else
- if checkpid $pid; then
- kill $killlevel $pid >/dev/null 2>&1
- RC=$?
- [ "$RC" -eq 0 ] && success $"$base $killlevel" || failure $"$base $killlevel"
- elif [ -n "${LSB:-}" ]; then
- RC=7 # Program is not running
- fi
- fi
- else
- if [ -n "${LSB:-}" -a -n "$killlevel" ]; then
- RC=7 # Program is not running
- else
- failure $"$base shutdown"
- RC=0
- fi
- fi
-
- # Remove pid file if any.
- if [ -z "$killlevel" ]; then
- rm -f "${pid_file:-/var/run/$base.pid}"
- fi
- return $RC
-}
-
-# A function to find the pid of a program. Looks *only* at the pidfile
-pidfileofproc() {
- local pid
-
- # Test syntax.
- if [ "$#" = 0 ] ; then
- echo $"Usage: pidfileofproc {program}"
- return 1
- fi
-
- __pids_var_run "$1"
- [ -n "$pid" ] && echo $pid
- return 0
-}
-
-# A function to find the pid of a program.
-pidofproc() {
- local RC pid pid_file=
-
- # Test syntax.
- if [ "$#" = 0 ]; then
- echo $"Usage: pidofproc [-p pidfile] {program}"
- return 1
- fi
- if [ "$1" = "-p" ]; then
- pid_file=$2
- shift 2
- fi
- fail_code=3 # "Program is not running"
-
- # First try "/var/run/*.pid" files
- __pids_var_run "$1" "$pid_file"
- RC=$?
- if [ -n "$pid" ]; then
- echo $pid
- return 0
- fi
-
- [ -n "$pid_file" ] && return $RC
- __pids_pidof "$1" || return $RC
-}
-
-status() {
- local base pid lock_file= pid_file=
-
- # Test syntax.
- if [ "$#" = 0 ] ; then
- echo $"Usage: status [-p pidfile] {program}"
- return 1
- fi
- if [ "$1" = "-p" ]; then
- pid_file=$2
- shift 2
- fi
- if [ "$1" = "-l" ]; then
- lock_file=$2
- shift 2
- fi
- base=${1##*/}
-
- if [ "$_use_systemctl" = "1" ]; then
- systemctl status ${0##*/}.service
- return $?
- fi
-
- # First try "pidof"
- __pids_var_run "$1" "$pid_file"
- RC=$?
- if [ -z "$pid_file" -a -z "$pid" ]; then
- pid="$(__pids_pidof "$1")"
- fi
- if [ -n "$pid" ]; then
- echo $"${base} (pid $pid) is running..."
- return 0
- fi
-
- case "$RC" in
- 0)
- echo $"${base} (pid $pid) is running..."
- return 0
- ;;
- 1)
- echo $"${base} dead but pid file exists"
- return 1
- ;;
- 4)
- echo $"${base} status unknown due to insufficient privileges."
- return 4
- ;;
- esac
- if [ -z "${lock_file}" ]; then
- lock_file=${base}
- fi
- # See if /var/lock/subsys/${lock_file} exists
- if [ -f /var/lock/subsys/${lock_file} ]; then
- echo $"${base} dead but subsys locked"
- return 2
- fi
- echo $"${base} is stopped"
- return 3
-}
-
-echo_success() {
- [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
- echo -n "["
- [ "$BOOTUP" = "color" ] && $SETCOLOR_SUCCESS
- echo -n $" OK "
- [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
- echo -n "]"
- echo -ne "\r"
- return 0
-}
-
-echo_failure() {
- [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
- echo -n "["
- [ "$BOOTUP" = "color" ] && $SETCOLOR_FAILURE
- echo -n $"FAILED"
- [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
- echo -n "]"
- echo -ne "\r"
- return 1
-}
-
-echo_passed() {
- [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
- echo -n "["
- [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
- echo -n $"PASSED"
- [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
- echo -n "]"
- echo -ne "\r"
- return 1
-}
-
-echo_warning() {
- [ "$BOOTUP" = "color" ] && $MOVE_TO_COL
- echo -n "["
- [ "$BOOTUP" = "color" ] && $SETCOLOR_WARNING
- echo -n $"WARNING"
- [ "$BOOTUP" = "color" ] && $SETCOLOR_NORMAL
- echo -n "]"
- echo -ne "\r"
- return 1
-}
-
-# Inform the graphical boot of our current state
-update_boot_stage() {
- if [ -x /usr/bin/plymouth ]; then
- /usr/bin/plymouth --update="$1"
- fi
- return 0
-}
-
-# Log that something succeeded
-success() {
- [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_success
- return 0
-}
-
-# Log that something failed
-failure() {
- local rc=$?
- [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_failure
- [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --details
- return $rc
-}
-
-# Log that something passed, but may have had errors. Useful for fsck
-passed() {
- local rc=$?
- [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_passed
- return $rc
-}
-
-# Log a warning
-warning() {
- local rc=$?
- [ "$BOOTUP" != "verbose" -a -z "${LSB:-}" ] && echo_warning
- return $rc
-}
-
-# Run some action. Log its output.
-action() {
- local STRING rc
-
- STRING=$1
- echo -n "$STRING "
- shift
- "$@" && success $"$STRING" || failure $"$STRING"
- rc=$?
- echo
- return $rc
-}
-
-# returns OK if $1 contains $2
-strstr() {
- [ "${1#*$2*}" = "$1" ] && return 1
- return 0
-}
-
-# Confirm whether we really want to run this service
-confirm() {
- [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --hide-splash
- while : ; do
- echo -n $"Start service $1 (Y)es/(N)o/(C)ontinue? [Y] "
- read answer
- if strstr $"yY" "$answer" || [ "$answer" = "" ] ; then
- return 0
- elif strstr $"cC" "$answer" ; then
- rm -f /var/run/confirm
- [ -x /usr/bin/plymouth ] && /usr/bin/plymouth --show-splash
- return 2
- elif strstr $"nN" "$answer" ; then
- return 1
- fi
- done
-}
-
-# resolve a device node to its major:minor numbers in decimal or hex
-get_numeric_dev() {
-(
- fmt="%d:%d"
- if [ "$1" = "hex" ]; then
- fmt="%x:%x"
- fi
- ls -lH "$2" | awk '{ sub(/,/, "", $5); printf("'"$fmt"'", $5, $6); }'
-) 2>/dev/null
-}
-
-# Check whether file $1 is a backup or rpm-generated file and should be ignored
-is_ignored_file() {
- case "$1" in
- *~ | *.bak | *.orig | *.rpmnew | *.rpmorig | *.rpmsave)
- return 0
- ;;
- esac
- return 1
-}
-
-# Evaluate shvar-style booleans
-is_true() {
- case "$1" in
- [tT] | [yY] | [yY][eE][sS] | [tT][rR][uU][eE])
- return 0
- ;;
- esac
- return 1
-}
-
-# Evaluate shvar-style booleans
-is_false() {
- case "$1" in
- [fF] | [nN] | [nN][oO] | [fF][aA][lL][sS][eE])
- return 0
- ;;
- esac
- return 1
-}
-
-key_is_random() {
- [ "$1" = "/dev/urandom" -o "$1" = "/dev/hw_random" \
- -o "$1" = "/dev/random" ]
-}
-
-find_crypto_mount_point() {
- local fs_spec fs_file fs_vfstype remaining_fields
- local fs
- while read fs_spec fs_file remaining_fields; do
- if [ "$fs_spec" = "/dev/mapper/$1" ]; then
- echo $fs_file
- break;
- fi
- done < /etc/fstab
-}
-
-# Because of a chicken/egg problem, init_crypto must be run twice. /var may be
-# encrypted but /var/lib/random-seed is needed to initialize swap.
-init_crypto() {
- local have_random dst src key opt mode owner params makeswap skip arg opt
- local param value rc ret mke2fs mdir prompt mount_point
-
- ret=0
- have_random=$1
- while read dst src key opt; do
- [ -z "$dst" -o "${dst#\#}" != "$dst" ] && continue
- [ -b "/dev/mapper/$dst" ] && continue;
- if [ "$have_random" = 0 ] && key_is_random "$key"; then
- continue
- fi
- if [ -n "$key" -a "x$key" != "xnone" ]; then
- if test -e "$key" ; then
- owner=$(ls -l $key | (read a b owner rest; echo $owner))
- if ! key_is_random "$key"; then
- mode=$(ls -l "$key" | cut -c 5-10)
- if [ "$mode" != "------" ]; then
- echo $"INSECURE MODE FOR $key"
- fi
- fi
- if [ "$owner" != root ]; then
- echo $"INSECURE OWNER FOR $key"
- fi
- else
- echo $"Key file for $dst not found, skipping"
- ret=1
- continue
- fi
- else
- key=""
- fi
- params=""
- makeswap=""
- mke2fs=""
- skip=""
- # Parse the src field for UUID= and convert to real device names
- if [ "${src%%=*}" == "UUID" ]; then
- src=$(/sbin/blkid -t "$src" -l -o device)
- elif [ "${src/^\/dev\/disk\/by-uuid\/}" != "$src" ]; then
- src=$(__readlink $src)
- fi
- # Is it a block device?
- [ -b "$src" ] || continue
- # Is it already a device mapper slave? (this is gross)
- devesc=${src##/dev/}
- devesc=${devesc//\//!}
- for d in /sys/block/dm-*/slaves ; do
- [ -e $d/$devesc ] && continue 2
- done
- # Parse the options field, convert to cryptsetup parameters and
- # contruct the command line
- while [ -n "$opt" ]; do
- arg=${opt%%,*}
- opt=${opt##$arg}
- opt=${opt##,}
- param=${arg%%=*}
- value=${arg##$param=}
-
- case "$param" in
- cipher)
- params="$params -c $value"
- if [ -z "$value" ]; then
- echo $"$dst: no value for cipher option, skipping"
- skip="yes"
- fi
- ;;
- size)
- params="$params -s $value"
- if [ -z "$value" ]; then
- echo $"$dst: no value for size option, skipping"
- skip="yes"
- fi
- ;;
- hash)
- params="$params -h $value"
- if [ -z "$value" ]; then
- echo $"$dst: no value for hash option, skipping"
- skip="yes"
- fi
- ;;
- verify)
- params="$params -y"
- ;;
- swap)
- makeswap=yes
- ;;
- tmp)
- mke2fs=yes
- esac
- done
- if [ "$skip" = "yes" ]; then
- ret=1
- continue
- fi
- if [ -z "$makeswap" ] && cryptsetup isLuks "$src" 2>/dev/null ; then
- if key_is_random "$key"; then
- echo $"$dst: LUKS requires non-random key, skipping"
- ret=1
- continue
- fi
- if [ -n "$params" ]; then
- echo "$dst: options are invalid for LUKS partitions," \
- "ignoring them"
- fi
- if [ -n "$key" ]; then
- /sbin/cryptsetup -d $key luksOpen "$src" "$dst" <&1 2>/dev/null && success || failure
- rc=$?
- else
- mount_point="$(find_crypto_mount_point $dst)"
- [ -n "$mount_point" ] || mount_point=${src##*/}
- prompt=$(printf $"%s is password protected" "$mount_point")
- plymouth ask-for-password --prompt "$prompt" --command="/sbin/cryptsetup luksOpen -T1 $src $dst" <&1
- rc=$?
- fi
- else
- [ -z "$key" ] && plymouth --hide-splash
- /sbin/cryptsetup $params ${key:+-d $key} create "$dst" "$src" <&1 2>/dev/null && success || failure
- rc=$?
- [ -z "$key" ] && plymouth --show-splash
- fi
- if [ $rc -ne 0 ]; then
- ret=1
- continue
- fi
- if [ -b "/dev/mapper/$dst" ]; then
- if [ "$makeswap" = "yes" ]; then
- mkswap "/dev/mapper/$dst" 2>/dev/null >/dev/null
- fi
- if [ "$mke2fs" = "yes" ]; then
- if mke2fs "/dev/mapper/$dst" 2>/dev/null >/dev/null \
- && mdir=$(mktemp -d /tmp/mountXXXXXX); then
- mount "/dev/mapper/$dst" "$mdir" && chmod 1777 "$mdir"
- umount "$mdir"
- rmdir "$mdir"
- fi
- fi
- fi
- done < /etc/crypttab
- return $ret
-}
-
-# A sed expression to filter out the files that is_ignored_file recognizes
-__sed_discard_ignored_files='/\(~\|\.bak\|\.orig\|\.rpmnew\|\.rpmorig\|\.rpmsave\)$/d'
-
-if [ "$_use_systemctl" = "1" ]; then
- if [ "x$1" = xstart -o \
- "x$1" = xstop -o \
- "x$1" = xrestart -o \
- "x$1" = xreload -o \
- "x$1" = xtry-restart -o \
- "x$1" = xforce-reload -o \
- "x$1" = xcondrestart ] ; then
-
- systemctl_redirect $0 $1
- exit $?
- fi
-fi
+++ /dev/null
-#!/bin/bash
-#
-# sfa
-# Provides a generic SFA wrapper based on the initial PlanetLab Implementation
-#
-# hopefully right after plc
-# chkconfig: 2345 61 39
-#
-### BEGIN INIT INFO
-# Provides: sfa
-# Required-Start: postgresql
-# Required-Stop: postgresql
-# Default-Start: 2 3 4 5
-# Default-Stop: 0 1 6
-# Short-Description: An implementation of the SFA Architecture
-### END INIT INFO
-
-####################
-# borrowed from postgresql
-function debian_get_postgresql_versions () {
- versions=()
- for v in `ls /usr/lib/postgresql/ 2>/dev/null`; do
- if [ -x /usr/lib/postgresql/$v/bin/pg_ctl ] && [ ! -x /etc/init.d/postgresql-$v ]; then
- versions+=($v)
- fi
- done
- if [[ ${#versions[*]} == "0" ]]; then
- echo "E: Missing postgresql installation. Aborting."
- exit
- fi
- if [[ ${#versions[*]} != "1" ]]; then
- echo "E: Too many postgresql versions installed. Aborting."
- exit
- fi
- pgver=${versions[0]}
-}
-
-####################
-POSTGRESQL_STARTED=/etc/sfa/postgresql-started
-if [ -f /etc/redhat-release ] ; then
- # source function library
- . /etc/init.d/functions
- PGDATA=/var/lib/pgsql/data/
- PGWATCH=postmaster
- PGLOCK=/var/lock/subsys/postgresql
- SFALOCK=/var/lock/subsys/sfa-start.pid
-elif [ -f /etc/debian_version ] ; then
- . /etc/init.d/functions.sfa
- debian_get_postgresql_versions
- PGDATA=/etc/postgresql/$pgver/main/
- PGWATCH=postgres
- PGLOCK=/var/run/postgresql/$pgver-main.pid
- SFALOCK=/var/run/sfa-start.pid
-else
- echo "initscript can only handle redhat/fedora or debian/ubuntu systems"
- exit 1
-fi
-
-
-postgresql_conf=$PGDATA/postgresql.conf
-pg_hba_conf=$PGDATA/pg_hba.conf
-postgresql_sysconfig=/etc/sysconfig/pgsql
-
-# SFA consolidated (merged) config file
-sfa_whole_config=/etc/sfa/sfa_config
-# SFA default config (read-only template)
-sfa_default_config=/etc/sfa/default_config.xml
-# SFA local (site-dependent) file
-sfa_local_config=/etc/sfa/configs/site_config
-sfa_local_config_xml=/etc/sfa/configs/site_config.xml
-
-# Source sfa shell config if present
-[ -f /etc/sfa/sfa_config.sh ] && . /etc/sfa/sfa_config.sh
-
-# Export so that we do not have to specify -p to psql invocations
-export PGPORT=$SFA_DB_PORT
-
-##########
-# Total number of errors
-ERRORS=0
-
-# Count the exit status of the last command
-check ()
-{
- ERRORS=$(($ERRORS+$?))
-}
-
-# can't trust the return of service postgresql start / nor status
-function postgresql_check () {
-
- # wait until postmaster is up and running - or 10s max
- if status $PGWATCH >& /dev/null && [ -f $PGLOCK ] ; then
- # The only way we can be sure is if we can access it
- for i in $(seq 1 10) ; do
- # Must do this as the postgres user initially (before we
- # fix pg_hba.conf to passwordless localhost access).
- su -c 'psql -U postgres -c "" template1' postgres && return 0
- sleep 1
- done
- fi
-
- return 1
-}
-
-# use a single date of this script invocation for the dump_*_db functions.
-DATE=$(date +"%Y-%m-%d-%H-%M-%S")
-
-# Dumps the database - optional argument to specify filename suffix
-function dump_sfa_db() {
- if [ -n "$1" ] ; then suffix="-$1" ; else suffix="" ; fi
- mkdir -p /usr/share/sfa/backups
- dumpfile=/usr/share/sfa/backups/$(date +"${SFA_DB_NAME}.${DATE}${suffix}.sql")
- pg_dump -U $SFA_DB_USER $SFA_DB_NAME > $dumpfile
- echo "Saved sfa database in $dumpfile"
- check
-}
-
-# Regenerate configuration files - almost verbatim from plc.init
-function reload () {
- force=$1
-
- # Regenerate the main configuration file from default values
- # overlaid with site-specific and current values.
- files=( $sfa_default_config $sfa_local_config )
- for file in "${files[@]}" ; do
- if [ -n "$force" -o $file -nt $sfa_whole_config ] ; then
- tmp=$(mktemp /tmp/sfa_config.XXXXXX)
- sfa-config --python "${files[@]}" >$tmp
- if [ $? -eq 0 ] ; then
- mv $tmp $sfa_whole_config
- chmod 444 $sfa_whole_config
- else
- echo "SFA: Warning: Invalid configuration file(s) detected"
- rm -f $tmp
- fi
- break
- fi
- done
-
- # Convert configuration to various formats
- if [ -f $sfa_local_config_xml ] ; then
- sfa-config --python $sfa_local_config_xml > $sfa_local_config
- rm $sfa_local_config_xml
- fi
- if [ -n "$force" -o $sfa_local_config -nt $sfa_whole_config ] ; then
- sfa-config --python $sfa_default_config $sfa_local_config > $sfa_whole_config
- fi
- if [ -n "$force" -o $sfa_whole_config -nt /etc/sfa/sfa_config.sh ] ; then
- sfa-config --shell $sfa_default_config $sfa_local_config > /etc/sfa/sfa_config.sh
- fi
-
- # [re]generate the sfa_component_config
- # this is a server-side thing but produces a file that somehow needs to be pushed
- # on the planetlab nodes; in the case where sfa and myplc run on different boxes
- # (or there is no myplc at all) this should be turned off
- # as the component manager is not operational yet we skip this for now
- #gen-sfa-cm-config.py
-
- # reload the shell version
- [ -f /etc/sfa/sfa_config.sh ] && . /etc/sfa/sfa_config.sh
-
-}
-
-### initialize DB (don't chkconfig postgresql on)
-function db_start () {
-
- # only if enabled
- [ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
-
- ######## sysconfig
- # Set data directory and redirect startup output to /var/log/pgsql
- mkdir -p $(dirname $postgresql_sysconfig)
- # remove previous definitions
- touch $postgresql_sysconfig
- tmp=${postgresql_sysconfig}.new
- ( egrep -v '^(PGDATA=|PGLOG=|PGPORT=)' $postgresql_sysconfig
- echo "PGDATA=$PGDATA"
- echo "PGLOG=/var/log/pgsql"
- echo "PGPORT=$SFA_DB_PORT"
- ) >> $tmp ; mv -f $tmp $postgresql_sysconfig
-
- ######## /var/lib/pgsql/data
- # Fix ownership (rpm installation may have changed it)
- chown -R -H postgres:postgres $(dirname $PGDATA)
-
- # PostgreSQL must be started at least once to bootstrap
- # /var/lib/pgsql/data
- if [ ! -f $postgresql_conf ] ; then
- service postgresql initdb &> /dev/null || :
- check
- fi
-
- ######## /var/lib/pgsql/data/postgresql.conf
- registry_ip=""
- foo=$(python -c "import socket; print socket.gethostbyname(\"$SFA_REGISTRY_HOST\")") && registry_ip="$foo"
- # Enable DB server. drop Postgresql<=7.x
- # PostgreSQL >=8.0 defines listen_addresses
- # listen on a specific IP + localhost, more robust when run within a vserver
- sed -i -e '/^listen_addresses/d' $postgresql_conf
- if [ -z "$registry_ip" ] ; then
- echo "listen_addresses = 'localhost'" >> $postgresql_conf
- else
- echo "listen_addresses = '${registry_ip},localhost'" >> $postgresql_conf
- fi
- # tweak timezone to be 'UTC'
- sed -i -e '/^timezone=/d' $postgresql_conf
- echo "timezone='UTC'" >> $postgresql_conf
-
- ######## /var/lib/pgsql/data/pg_hba.conf
- # Disable access to all DBs from all hosts
- sed -i -e '/^\(host\|local\)/d' $pg_hba_conf
-
- # Enable passwordless localhost access
- echo "local all all trust" >>$pg_hba_conf
- # grant access
- (
- echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
- [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
- ) >>$pg_hba_conf
-
- if [ "$SFA_GENERIC_FLAVOUR" == "openstack" ] ; then
- [ -n "$registry_ip" ] && echo "host nova nova ${registry_ip}/32 password" >> $pg_hba_conf
- fi
-
- # Fix ownership (sed -i changes it)
- chown postgres:postgres $postgresql_conf $pg_hba_conf
-
- ######## compute a password if needed
- if [ -z "$SFA_DB_PASSWORD" ] ; then
- SFA_DB_PASSWORD=$(uuidgen)
- sfa-config --category=sfa_db --variable=password --value="$SFA_DB_PASSWORD" --save=$sfa_local_config $sfa_local_config >& /dev/null
- reload force
- fi
-
- ######## Start up the server
- # not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- postgresql_check || {
- service postgresql start >& /dev/null
- MESSAGE=$"Starting PostgreSQL server"
- echo -n "$MESSAGE"
- [ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
- # best-effort to make sure we turn it back off when running stop
- touch $POSTGRESQL_STARTED
- }
- postgresql_check
- check
-
- ######## make sure we have the user and db created
- # user
- if ! psql -U $SFA_DB_USER -c "" template1 >/dev/null 2>&1 ; then
- psql -U postgres -c "CREATE USER $SFA_DB_USER PASSWORD '$SFA_DB_PASSWORD'" template1 >& /dev/null
- else
- psql -U postgres -c "ALTER USER $SFA_DB_USER WITH PASSWORD '$SFA_DB_PASSWORD'" template1 >& /dev/null
- fi
- check
-
- # db
- if ! psql -U $SFA_DB_USER -c "" $SFA_DB_NAME >/dev/null 2>&1 ; then
- createdb -U postgres --template=template0 --encoding=UNICODE --owner=$SFA_DB_USER $SFA_DB_NAME
- check
- fi
- check
- # mention sfaadmin.py instead of just sfaadmin for safety
- sfaadmin.py reg sync_db
-
- MESSAGE=$"SFA: Checking for PostgreSQL server"
- echo -n "$MESSAGE"
- [ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
-}
-
-# shutdown DB
-function db_stop () {
-
- # only if enabled
- [ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
-
- # not too nice, but.. when co-located with myplc we'll let it start/stop postgresql
- if [ -f $POSTGRESQL_STARTED ] ; then
- service postgresql stop >& /dev/null
- check
- MESSAGE=$"Stopping PostgreSQL server"
- echo -n "$MESSAGE"
- [ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
- rm -f $POSTGRESQL_STARTED
- fi
-}
-
-function start() {
-
- reload
-
- db_start
- # migrations are now handled in the code by sfa.storage.dbschema
-
- # install peer certs
- action $"SFA: installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS
-
- [ "$SFA_REGISTRY_ENABLED" == 1 -o "$SFA_REGISTRY_ENABLED" == True ] && action $"SFA: Registry" daemon /usr/bin/sfa-start.py -r -d $OPTIONS
-
- [ "$SFA_AGGREGATE_ENABLED" == 1 -o "$SFA_AGGREGATE_ENABLED" == True ] && action $"SFA: Aggregate" daemon /usr/bin/sfa-start.py -a -d $OPTIONS
-
- [ "$SFA_SM_ENABLED" == 1 -o "$SFA_SM_ENABLED" == True ] && action "SFA: SliceMgr" daemon /usr/bin/sfa-start.py -s -d $OPTIONS
-
- [ "$SFA_FLASHPOLICY_ENABLED" == 1 -o "$SFA_FLASHPOLICY_ENABLED" == True ] && \
- action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
-
- touch $SFALOCK
-
-}
-
-function stop() {
- action $"Shutting down SFA" killproc sfa-start.py
-# a possible alternative reads; esp. as we remove lock manually below
-# echo $"Shutting down SFA" ; pkill '^sfa-start'
-
- db_stop
-
- rm -f $SFALOCK
-}
-
-
-case "$1" in
- start) start ;;
- stop) stop ;;
- reload) reload force ;;
- restart) stop; start ;;
- condrestart)
- if [ -f $SFALOCK ]; then
- stop
- start
- fi
- ;;
- status)
- status sfa-start.py
-# possible alternative for debian
-# pids=$(pgrep '^sfa-start'); [ -n "$pids" ] && ps $pids
-
- RETVAL=$?
- ;;
- dbdump)
- dump_sfa_db
- ;;
- *)
- echo $"Usage: $0 {start|stop|reload|restart|condrestart|status|dbdump}"
- exit 1
- ;;
-esac
-
-exit $RETVAL
+++ /dev/null
-#!/bin/bash
-#
-# sfa Wraps PLCAPI into the SFA compliant API
-#
-# chkconfig: 2345 5 99
-#
-# description: Wraps PLCAPI into the SFA compliant API
-#
-
-echo "sfa-cm is no longer supported"
-echo "you should consider rpm -e sfa-cm"
-exit 1
-
-# Source config
-[ -f /etc/sfa/sfa_config.sh ] && . /etc/sfa/sfa_config.sh
-
-# source function library
-. /etc/init.d/functions
-
-init_key() {
- # if key doesnt exist use sfa_componenet_setup to get it
- if [ ! -f /var/lib/sfa/server.key ]; then
- /usr/bin/sfa_component_setup.py -k
- fi
-}
-
-start() {
- echo -n $"Starting SFA: "
-
- if [ "$SFA_CM_ENABLED" ]; then
- echo "Component Mgr"
- # make sure server key (nodes private key) exists first
- init_key
- /usr/bin/sfa-start.py -c -d $OPTIONS
- fi
-
- RETVAL=$?
- echo
- [ $RETVAL -eq 0 ] && touch /var/lock/subsys/sfa
-
-}
-
-stop() {
- echo -n $"Shutting down SFA: "
- killproc sfa-start.py
- RETVAL=$?
-
- echo
- [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/sfa
-}
-
-
-
-case "$1" in
- start)
- start
- ;;
- stop)
- stop
- ;;
- restart|reload)
- stop
- start
- ;;
- condrestart)
- if [ -f /var/lock/subsys/sfa ]; then
- stop
- start
- fi
- ;;
- status)
- status sfa
- RETVAL=$?
- ;;
- *)
- echo $"Usage: $0 {start|stop|restart|condrestart|status}"
- exit 1
-esac
-
-exit $RETVAL
-
except:
version_tag='cleaningup'
-scripts = glob("clientbin/*.py") + \
- [
+scripts = glob("clientbin/*.py") + [
'config/sfa-config-tty',
'config/sfa-config',
-# 'config/gen-sfa-cm-config.py',
'sfa/server/sfa-start.py',
-# 'sfa/server/sfa_component_setup.py',
+ 'systemd/sfa-setup.sh',
'sfatables/sfatables',
'keyconvert/keyconvert.py',
- 'flashpolicy/sfa_flashpolicy.py',
- ]
+]
packages = [
'sfa',
'sfa/rspecs/versions',
'sfa/client',
'sfa/planetlab',
- 'sfa/nitos',
'sfa/dummy',
- 'sfa/openstack',
- 'sfa/federica',
'sfa/iotlab',
'sfatables',
'sfatables/commands',
'sfatables/processors',
]
-initscripts = [ 'sfa' ]
-if not os.path.isfile('/etc/redhat-release'):
- initscripts.append('functions.sfa')
-
-data_files = [ ('/etc/sfa/', [ 'config/aggregates.xml',
- 'config/registries.xml',
- 'config/default_config.xml',
- 'config/api_versions.xml',
- 'config/sfi_config',
- 'config/topology',
- 'sfa/managers/pl/pl.rng',
- 'sfa/trust/credential.xsd',
- 'sfa/trust/top.xsd',
- 'sfa/trust/sig.xsd',
- 'sfa/trust/xml.xsd',
- 'sfa/trust/protogeni-rspec-common.xsd',
- 'flashpolicy/sfa_flashpolicy_config.xml',
- ]),
- ('/etc/sfatables/matches/', glob('sfatables/matches/*.xml')),
- ('/etc/sfatables/targets/', glob('sfatables/targets/*.xml')),
- ('/etc/init.d/', [ "init.d/%s"%x for x in initscripts ]),
- ('/usr/share/sfa/migrations', glob('sfa/storage/migrations/*.*') ),
- ('/usr/share/sfa/migrations/versions', glob('sfa/storage/migrations/versions/*') ),
- ('/usr/share/sfa/examples/', glob('sfa/examples/*' ) + [ 'cron.d/sfa.cron' ] ),
- ]
-
-# add sfatables processors as data_files
-processor_files = [f for f in glob('sfatables/processors/*') if os.path.isfile(f)]
+data_files = [
+ ('/etc/sfa/',
+ [ 'config/aggregates.xml',
+ 'config/registries.xml',
+ 'config/default_config.xml',
+ 'config/api_versions.xml',
+ 'config/sfi_config',
+ 'config/topology',
+ 'sfa/managers/pl/pl.rng',
+ 'sfa/trust/credential.xsd',
+ 'sfa/trust/top.xsd',
+ 'sfa/trust/sig.xsd',
+ 'sfa/trust/xml.xsd',
+ 'sfa/trust/protogeni-rspec-common.xsd',
+ ]),
+ ('/etc/sfatables/matches/', glob('sfatables/matches/*.xml')),
+ ('/etc/sfatables/targets/', glob('sfatables/targets/*.xml')),
+ ('/usr/share/sfa/migrations', glob('sfa/storage/migrations/*.*') ),
+ ('/usr/share/sfa/migrations/versions', glob('sfa/storage/migrations/versions/*') ),
+ ('/usr/share/sfa/examples/', glob('sfa/examples/*' ) + [ 'cron.d/sfa.cron' ] ),
+]
+
+# use /lib/systemd instead of /usr/lib/systemd
+# the latter would work on fedora only, the former
+# will work on both fedora and ubuntu
+services = ['sfa-db', 'sfa-aggregate', 'sfa-registry']
+data_files.append(
+ ('/lib/systemd/system',
+ ['systemd/{}.service'.format(service)
+ for service in services]))
+
+
+# sfatables processors
+processor_files = [f for f in glob('sfatables/processors/*')
+ if os.path.isfile(f)]
data_files.append(('/etc/sfatables/processors/', processor_files))
-processor_subdirs = [d for d in glob('sfatables/processors/*') if os.path.isdir(d)]
+processor_subdirs = [d for d in glob('sfatables/processors/*')
+ if os.path.isdir(d)]
for d in processor_subdirs:
etc_dir = os.path.join("/etc/sfatables/processors", os.path.basename(d))
d_files = [f for f in glob(d + '/*') if os.path.isfile(f)]
data_files.append((etc_dir, processor_files))
+
if sys.argv[1] in ['uninstall', 'remove', 'delete', 'clean']:
python_path = sys.path
site_packages_path = [ os.path.join(p, 'sfa') for p in python_path if p.endswith('site-packages')]
site_packages_path += [ os.path.join(p, 'sfatables') for p in python_path if p.endswith('site-packages')]
remove_dirs = ['/etc/sfa/', '/etc/sfatables'] + site_packages_path
remove_bins = [ '/usr/bin/' + os.path.basename(bin) for bin in scripts ]
- remove_files = remove_bins + [ "/etc/init.d/%s"%x for x in initscripts ]
+ remove_files = (remove_bins
+ + ["/lib/systemd/system/{}".format(x)
+ for x in services])
# remove files
def feedback (file, msg):
try:
os.remove(filepath)
feedback(filepath, "success")
- except:
+ except:
feedback(filepath, "failed")
# remove directories
for directory in remove_dirs:
try:
shutil.rmtree(directory)
feedback (directory, "success")
- except:
+ except:
feedback (directory, "failed")
else:
# avoid repeating what's in the specfile already
long_description = long_description,
scripts = scripts,
)
-
%define name sfa
-%define version 3.1
-%define taglevel 22
+%define version 4.0
+%define taglevel 1
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
Distribution: PlanetLab
URL: %{SCMURL}
-Summary: Server-side for SFA, generic implementation derived from PlanetLab
+Summary: Server-side for SFA, generic implementation derived from PlanetLab
Group: Applications/System
BuildRequires: make
BuildRequires: python-setuptools
Requires: util-linux-ng
# and the SFA libraries of course
Requires: sfa-common
-
+
%package common
Summary: Python libraries for SFA, generic implementation derived from PlanetLab
Group: Applications/System
Group: Applications/System
Requires: sfa
-%package flashpolicy
-Summary: SFA support for flash clients
-Group: Applications/System
-Requires: sfa
-
-%package federica
-Summary: the SFA layer around Federica
-Group: Applications/System
-Requires: sfa
-
-%package nitos
-Summary: the SFA layer around NITOS
-Group: Applications/System
-Requires: sfa
-
%package iotlab
Summary: the SFA layer around IotLab
Group: Applications/System
Requires: sfa
%package dummy
-Summary: the SFA layer around a Dummy Testbed
+Summary: the SFA layer around a Dummy Testbed
Group: Applications/System
Requires: sfa
Group: Applications/System
Requires: sfa-common
-%description
+%description
This package provides the registry, aggregate manager and slice
managers for SFA. In most cases it is advisable to install additional
package for a given testbed, like e.g. sfa-plc for a PlanetLab tesbed.
This package implements the SFA interface which serves as a layer
between the existing PlanetLab interfaces and the SFA API.
-%description flashpolicy
-This package provides support for adobe flash client applications.
-
-%description federica
-The SFA driver for FEDERICA.
-
-%description nitos
-The SFA driver for NITOS.
-
%description iotlab
The SFA driver for IotLab.
rm -rf $RPM_BUILD_ROOT
%files
-/etc/init.d/sfa
+/lib/systemd/system/*.service
%{_bindir}/sfa-start.py*
%{_bindir}/sfaadmin.py*
%{_bindir}/sfaadmin
%{_bindir}/keyconvert.py*
%{_bindir}/sfa-config-tty
%{_bindir}/sfa-config
+%{_bindir}/sfa-setup.sh
%config /etc/sfa/default_config.xml
%config (noreplace) /etc/sfa/aggregates.xml
%config (noreplace) /etc/sfa/registries.xml
%files plc
%defattr(-,root,root)
%{python_sitelib}/sfa/planetlab
-%{python_sitelib}/sfa/openstack
/etc/sfa/pl.rng
/etc/sfa/credential.xsd
/etc/sfa/top.xsd
/etc/sfa/protogeni-rspec-common.xsd
/etc/sfa/topology
-%files flashpolicy
-%{_bindir}/sfa_flashpolicy.py*
-/etc/sfa/sfa_flashpolicy_config.xml
-
-%files federica
-%{python_sitelib}/sfa/federica
-
-%files nitos
-%{python_sitelib}/sfa/nitos
-
%files iotlab
%{python_sitelib}/sfa/iotlab
%files tests
%{_datadir}/sfa/tests
-### sfa installs the 'sfa' service
-%post
-chkconfig --add sfa
+# arbitrary choice here, subject to manual tweaks if needed
+# this is in line with default_config.xml
+# no need to enable sfa-db, will be activated as a dependency
+%post
+systemctl enable sfa-aggregate
+systemctl enable sfa-registry
-%preun
+%preun
if [ "$1" = 0 ] ; then
- /sbin/service sfa stop || :
- /sbin/chkconfig --del sfa || :
+ for service in sfa-aggregate sfa-registry sfa-db; do
+ systemctl is-enabled $service && systemctl disable $service
+ systemctl is-active $service && systemctl stop $service
+ done
fi
%postun
-[ "$1" -ge "1" ] && { service sfa dbdump ; service sfa restart ; }
-
-#### sfa-cm installs the 'sfa-cm' service
-#%post cm
-#chkconfig --add sfa-cm
-#
-#%preun cm
-#if [ "$1" = 0 ] ; then
-# /sbin/service sfa-cm stop || :
-# /sbin/chkconfig --del sfa-cm || :
-#fi
-#
-#%postun cm
-#[ "$1" -ge "1" ] && service sfa-cm restart || :
+if [ "$1" -ge "1" ] ; then
+ for service in sfa-db sfa-registry sfa-aggregate; do
+ systemctl is-active $service && systemctl restart $service
+ done
+fi
%changelog
+* Wed May 30 2018 Thierry <Parmentelat> - sfa-4.0-1
+- systemd service files install in /lib instead of /usr/lib for ubuntus
+- removed all features relating to slice manager
+- removed all features relating to component manager
+
+* Mon May 28 2018 Thierry <Parmentelat> - sfa-4.0-0
+- expose geni_api_versions as https://
+- avoid publishing non-relevant entries in GetVersion
+- fixes in the IoT-lab driver (thanks Loic)
+- reviewed logging policy, less awkward and more reliable; /var/log/sfa{,-import}.log should now be alive and time rotate
+- rewrote init-style startup script into systemd-native services: sfa-aggregate and sfa-registry, that both depend on sfa-db
+- huge cleanup, removed everything related to init.d; debian; flash-policy; max aggregate; federica, openstack/nova and nitos drivers
+
* Fri Mar 16 2018 Thierry <Parmentelat> - sfa-3.1-22
- pl: tweaks for exposing country / city on nodes from site tags if set
- pl: tweaks for exposing hardware_types on nodes from node tag 'hardware_type' if set
* Tue Jul 10 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.1-12
- Update Openstack driver to support Essex release/
- Fix authority xrn bug.
-
+
* Thu Jun 07 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-11
- review packaging - site-packages/planetlab now come with sfa-plc
* Mon Apr 16 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.1-5
- make sync now supports vserver or lxc.
-- Added slice expiration and login info to SliverStatus response.
+- Added slice expiration and login info to SliverStatus response.
- Fixed CreateSliver bug that causes the method to fail if any node element is missing
the 'component_name' attribute.
- Fixed various bugs that caused SFA to generate invalid or incorrect sliver ids.
-
+
* Tue Mar 20 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.1-4
- Introduced new administrative command line script, sfaadmin.py. Removed various single
purpose scripts and migrated their functionality into sfaadmin.py.
- Refactored Registry import scripts.
- Removed SQLAlchemy dependency from sfi.py.
- Fixed bugs in sfi.py
-- Registry, Aggregate and SliceManager now support the OpenStack framework.
+- Registry, Aggregate and SliceManager now support the OpenStack framework.
* Fri Feb 24 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-3
- slice x researcher rel. in database,
* Wed Jan 25 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-10
- client: added -R --raw sfi cmdline option that displays raw server response.
-- client: request GENI RSpec by default.
+- client: request GENI RSpec by default.
- server: remove database dependencies from sfa.server.sfaapi.
- server: increased default credential lifetime to 31 days.
- bugfix: fixed bug in sfa.storage.record.SfaRecord.delete().
- bugfix: fixed server key path in sfa.server.sfa-clean-peer-records.
-- bugfix: fixed bug in sfa.server.sfa-start.install_peer_certs().
-
+- bugfix: fixed bug in sfa.server.sfa-start.install_peer_certs().
+
* Sat Jan 7 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-9
- bugfix: 'geni_api' should be in the top level struct, not the code struct
- bugfix: Display the correct host and port in 'geni_api_versions' field of the GetVersion
- bugfix: sfa.util.sfatime.datetime_to_epoch() returns integers instead of doubles.
- bugfix: Fixed bug that prevented the rspec parser from identifying an rspec's schema when
there is extra whitespace in the schemaLocation field.
-- bugfix: Fixed bug that caused PlanetLab initscripts from showing up in the PGv2 and GENIv3
+- bugfix: Fixed bug that caused PlanetLab initscripts from showing up in the PGv2 and GENIv3
advertisement rspecs.
- bugfix: <login> RSpec element should contain the 'username' attribute.
-- bugfix: Use sfa.util.plxrn.PlXrn to parse the login_base (authority) out of a urn.
-
+- bugfix: Use sfa.util.plxrn.PlXrn to parse the login_base (authority) out of a urn.
+
* Wed Jan 4 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-8
-- bugfix: Fixed a bug in the sfa-import-plc.py script that caused the script to
+- bugfix: Fixed a bug in the sfa-import-plc.py script that caused the script to
exit when it encountered a user with an invalid public key.
- server: imporved logging in sfa-import-plc.py
-
+
* Tue Jan 3 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.0-7
- bugfix: Fixed appending public keys in CreateSliver
- bugfix: Fixed various bugs in the PGv2/GENIv3 request, advertisement and manifest rspecs.
- client: -c --current option allows users to request the current/uncached rspec.
- server: Added 'geni_api_versions' field to GetVersion() output.
- server: Moved PLC specific code from sfa.importer.sfaImport to sfa.importer.sfa-import-plc.
-
+
* Fri Dec 16 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.0-6
- bugfix: sfi was not sending call_id with ListResources to v2 servers
- SFA_API_DEBUG replaced with SFA_API_LOGLEVEL
- Unicode-friendliness for user names with accents/special chars.
- Fix bug that could cause create the client to fail when calling CreateSliver for a slice that has the same hrn as a user.
- CreaetSliver no longer fails for users that have a capital letter in their URN.
-- Fix bug in CreateSliver that generated incorrect login bases and email addresses for ProtoGENI requests.
+- Fix bug in CreateSliver that generated incorrect login bases and email addresses for ProtoGENI requests.
- Allow files with .gid, .pem or .crt extension to be loaded into the server's list of trusted certs.
-- Fix bugs and missing imports
-
+- Fix bugs and missing imports
+
* Tue Aug 30 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-35
- new method record.get_field for sface
* Wed Aug 24 2011 Tony Mack <tmack@cs.princeton.edu> - sfa-1.0-32
- Fixed exploit that allowed an authorities to issue certs for objects that dont belong to them.
- Fixed holes in certificate verification logic.
-- Aggregates no longer try to lookup slice and person records when processing CreateSliver requests. Clients are now required to specify this info in the 'users' argument.
+- Aggregates no longer try to lookup slice and person records when processing CreateSliver requests. Clients are now required to specify this info in the 'users' argument.
- Added 'boot_state' as an attribute of the node element in SFA rspec.
- Non authority certificates are marked as CA:FALSE.
- Added SFA_MAX_SLICE_RENEW which allows operators to configure the max ammout
of days a user can extend their slice expiration.
- CA certs are only issued to objects of type authority
-
+
* Fri Aug 05 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-29
- tag 1.0-28 was broken due to typo in the changelog
- new class sfa/util/httpsProtocol.py that supports timeouts
- Support authority+sm type.
- Fix rspec merging bugs.
- Only load certs that have .gid extension from /etc/sfa/trusted_roots/
-- Created a 'planetlab' extension to the ProtoGENI v2 rspec for supporting
- planetlab hosted initscripts using the <planetlab:initscript> tag
-- Can now handle extraneous whitespace in the rspec without failing.
-
+- Created a 'planetlab' extension to the ProtoGENI v2 rspec for supporting
+ planetlab hosted initscripts using the <planetlab:initscript> tag
+- Can now handle extraneous whitespace in the rspec without failing.
+
* Fri Jul 8 2011 Tony Mack <tmack@cs.princeton.edu> - sfa-1.0-27
- ProtoGENI v2 RSpec updates.
- Convert expiration timestamps with timezone info in credentials to utc.
-- Fixed redundant logging issue.
+- Fixed redundant logging issue.
- Improved SliceManager and SFI client logging.
-- Support aggregates that don't support the optional 'call_id' argument.
+- Support aggregates that don't support the optional 'call_id' argument.
- Only call get_trusted_certs() at aggreage interfaces that support the call.
- CreateSliver() now handles MyPLC slice attributes/tags.
- Cache now supports persistence.
* Fri Jun 10 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-23
- includes a change on passphrases that was intended in 1.0-22
-* Thu Jun 6 2011 Tony Mack <tmack@cs.princeton.edu> - sfa-1.0-22
+* Mon Jun 6 2011 Tony Mack <tmack@cs.princeton.edu> - sfa-1.0-22
- Added support for ProtoGENI RSpec v2
-
+
* Wed Mar 16 2011 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-1.0-21
- stable sfascan
- fix in initscript, *ENABLED tags in config now taken into account
* Tue Sep 07 2010 Tony Mack <tmack@cs.princeton.edu> - sfa-0.9-16
- truncate login base of external (ProtoGeni, etc) slices to 20 characters
to avoid returning a PLCAPI exception that might confuse users.
-- Enhance PLC aggregate performace by using a better filter when querying SliceTags.
-- fix build errors.
+- Enhance PLC aggregate performace by using a better filter when querying SliceTags.
+- fix build errors.
* Tue Aug 24 2010 Tony Mack <tmack@cs.princeton.edu> - sfa-0.9-15
- (Architecture) Credential format changed to match ProtoGENI xml format
- (Architecture) All interfaces export a new set of methods that are compatible
- with the ProtoGeni Aggregate spec. These new methods are considered a
- replacement for the pervious methods exported by the interfaces. All
- previous methods are still exported and work as normal, but they are
- considered deprecated and will not be supported in future releases.
+ with the ProtoGeni Aggregate spec. These new methods are considered a
+ replacement for the pervious methods exported by the interfaces. All
+ previous methods are still exported and work as normal, but they are
+ considered deprecated and will not be supported in future releases.
- (Architecture) SFI has been updated to use the new interface methods.
- (Architecture) Changed keyconvet implementation from c to python.
- (Architecture) Slice Manager now attempts looks for a delegated credential
provided by the client before using its own server credential.
-- (Archiceture) Slice Interface no longers stores cache of resources on disk.
+- (Archiceture) Slice Interface no longers stores cache of resources on disk.
This cache now exists only in memory and is cleared when service is restarted
- or cache lifetime is exceeded.
-- (Performance) SliceManager sends request to Aggregates in parallel instead
+ or cache lifetime is exceeded.
+- (Performance) SliceManager sends request to Aggregates in parallel instead
of sequentially.
- (Bug fix) SFA tickets now support the new rspec format.
- (Bug fix) SFI only uses cahced credential if they aren't expired.
- (Enhancement) SFI -a --aggregatge option now sends requests directly to the
Aggregate instead of relaying through the Slice Manager.
- (Enhancement) Simplified caching. Accociated a global cache instance with
- the api handler on every new server request, making it easier to access the
- cache and use in more general ways.
+ the api handler on every new server request, making it easier to access the
+ cache and use in more general ways.
-* Thu May 11 2010 Tony Mack <tmack@cs.princeton.edu> - sfa-0.9-11
+* Tue May 11 2010 Tony Mack <tmack@cs.princeton.edu> - sfa-0.9-11
- SfaServer now uses a pool of threads to handle requests concurrently
- sfa.util.rspec no longer used to process/manage rspecs (deprecated). This is now handled by sfa.plc.network and is not backwards compatible
- PIs can now get a slice credential for any slice at their site without having to be a member of the slice
- Registry records for federated peers (defined in registries.xml, aggregates.xml) updated when sfa service is started
-- Interfaces will try to fetch and install gids from peers listed in registries.xml/aggregates.xml if gid is not found in /etc/sfa/trusted_roots dir
-- Component manager does not install gid files if slice already has them
+- Interfaces will try to fetch and install gids from peers listed in registries.xml/aggregates.xml if gid is not found in /etc/sfa/trusted_roots dir
+- Component manager does not install gid files if slice already has them
- Server automatically fetches and installs peer certificats (defined in registries/aggregates.xml) when service is restarted.
- fix credential verification exploit (verify that the trusted signer is a parent of the object it it signed)
- made it easier for root authorities to sign their sub's certifiacate using the sfa-ca.py (sfa/server/sfa-ca.py) tool
-
+
* Thu Jan 21 2010 anil vengalil <avengali@sophia.inria.fr> - sfa-0.9-10
- This tag is quite same as the previous one (sfa-0.9-9) except that the vini and max aggregate managers are also updated for urn support. Other features are:
- - sfa-config-tty now has the same features like plc-config-tty
* Sat May 30 2009 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - geniwrapper-0.2-2
- bugfixes - still a work in progress
-* Fri May 18 2009 Baris Metin <tmetin@sophia.inria.fr>
+* Mon May 18 2009 Baris Metin <tmetin@sophia.inria.fr>
- initial package
# note that reg-email is not yet available
pl_fields = ['email', 'person_id',
'first_name', 'last_name', 'key_ids']
- nitos_fields = ['email', 'user_id']
- extra_fields = list(set(pl_fields).union(set(nitos_fields)))
# try to fill all these in
- for field in extra_fields:
+ for field in pl_fields:
if field in record:
user[field] = record[field]
users.append(user)
help="the filenames to upload")
parser.add_argument('-u', '--url', dest='url', action='store', default=None,
help='the URL of the manifold API')
- parser.add_argument('-p', '--platform', dest='platform', action='store', default=None,
+ parser.add_argument('-p', '--platform', dest='platform',
+ action='store', default=None,
help='the manifold platform name')
- parser.add_argument('-U', '--user', dest='username', action='store', default=None,
+ parser.add_argument('-U', '--user', dest='username',
+ action='store', default=None,
help='the manifold username')
- parser.add_argument('-P', '--password', dest='password', action='store', default=None,
+ parser.add_argument('-P', '--password', dest='password',
+ action='store', default=None,
help='the manifold password')
- parser.add_argument('-v', '--verbose', dest='verbose', action='count', default=0,
+ parser.add_argument('-v', '--verbose', dest='verbose',
+ action='count', default=0,
help='more and more verbose')
args = parser.parse_args()
- from sfa.util.sfalogging import sfi_logger
+ from sfa.util.sfalogging import init_logger, logger as sfi_logger
+ init_logger('console')
sfi_logger.enable_console()
sfi_logger.setLevelFromOptVerbose(args.verbose)
uploader = ManifoldUploader(url=args.url, platform=args.platform,
#!/usr/bin/python
+
+# pylint: disable=c0111, c0103, w0402, w0622
+
from __future__ import print_function
import os
import sys
import copy
-from pprint import pformat, PrettyPrinter
+from pprint import PrettyPrinter
from optparse import OptionParser
from sfa.generic import Generic
from sfa.util.xrn import Xrn
+from sfa.util.sfalogging import logger, init_logger
+
from sfa.storage.record import Record
from sfa.trust.hierarchy import Hierarchy
from sfa.trust.gid import GID
from sfa.trust.certificate import convert_public_key
-from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, terminal_render, filter_records
+from sfa.client.common import (optparse_listvalue_callback,
+ optparse_dictvalue_callback,
+ terminal_render, filter_records)
from sfa.client.candidates import Candidates
from sfa.client.sfi import save_records_to_file
pprinter = PrettyPrinter(indent=4)
+# if set, will output on stdout
+DEBUG = False
+
try:
help_basedir = Hierarchy().basedir
-except:
+except Exception:
help_basedir = '*unable to locate Hierarchy().basedir'
version = self.api.manager.GetVersion(self.api, {})
pprinter.pprint(version)
-
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='authority to list (hrn/urn - mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default='all')
- @add_options('-r', '--recursive', dest='recursive', metavar='<recursive>', help='list all child records',
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='authority to list (hrn/urn - mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default='all')
+ @add_options('-r', '--recursive', dest='recursive', metavar='<recursive>',
+ help='list all child records',
+ action='store_true', default=False)
+ @add_options('-v', '--verbose', dest='verbose',
action='store_true', default=False)
- @add_options('-v', '--verbose', dest='verbose', action='store_true', default=False)
def list(self, xrn, type=None, recursive=False, verbose=False):
- """List names registered at a given authority - possibly filtered by type"""
+ """
+ List names registered at a given authority, possibly filtered by type
+ """
xrn = Xrn(xrn, type)
options_dict = {'recursive': recursive}
records = self.api.manager.List(
list = filter_records(type, records)
# terminal_render expects an options object
- class Options:
- pass
- options = Options()
- options.verbose = verbose
- terminal_render(list, options)
+ class Options: # pylint: disable=r0903
+ def __init__(self, verbose):
+ self.verbose = verbose
+ options = Options(verbose)
+ terminal_render(list, options)
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
- @add_options('-o', '--outfile', dest='outfile', metavar='<outfile>', help='save record to file')
- @add_options('-f', '--format', dest='format', metavar='<display>', type='choice',
- choices=('text', 'xml', 'simple'), help='display record in different formats')
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default=None)
+ @add_options('-o', '--outfile', dest='outfile', metavar='<outfile>',
+ help='save record to file')
+ @add_options('-f', '--format', dest='format', metavar='<display>',
+ type='choice', choices=('text', 'xml', 'simple'),
+ help='display record in different formats')
def show(self, xrn, type=None, format=None, outfile=None):
"""Display details for a registered object"""
records = self.api.manager.Resolve(self.api, xrn, type, details=True)
if outfile:
save_records_to_file(outfile, records)
- def _record_dict(self, xrn, type, email, key,
+ @staticmethod
+ def _record_dict(xrn, type, email, key,
slices, researchers, pis,
url, description, extras):
record_dict = {}
record_dict.update(extras)
return record_dict
-
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type (mandatory)')
- @add_options('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False, help='check all users GID')
- @add_options('-v', '--verbose', dest='verbose', metavar='<verbose>', action='store_true', default=False, help='verbose mode: display user\'s hrn ')
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn', default=None)
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type (mandatory)')
+ @add_options('-a', '--all', dest='all', metavar='<all>',
+ action='store_true', default=False,
+ help='check all users GID')
+ @add_options('-v', '--verbose', dest='verbose', metavar='<verbose>',
+ action='store_true', default=False,
+ help='verbose mode: display user\'s hrn ')
def check_gid(self, xrn=None, type=None, all=None, verbose=None):
"""Check the correspondance between the GID and the PubKey"""
db_pubkey_str = record.reg_keys[0].key
try:
db_pubkey_obj = convert_public_key(db_pubkey_str)
- except:
+ except Exception:
ERROR.append(record.hrn)
continue
else:
print("Users NOT having a PubKey: %s\n\
Users having a non RSA PubKey: %s\n\
Users having a GID/PubKey correpondence OK: %s\n\
-Users having a GID/PubKey correpondence Not OK: %s\n" % (len(NOKEY), len(ERROR), len(OK), len(NOK)))
+Users having a GID/PubKey correpondence Not OK: %s\n"
+ % (len(NOKEY), len(ERROR), len(OK), len(NOK)))
else:
print("Users NOT having a PubKey: %s and are: \n%s\n\n\
Users having a non RSA PubKey: %s and are: \n%s\n\n\
Users having a GID/PubKey correpondence OK: %s and are: \n%s\n\n\
-Users having a GID/PubKey correpondence NOT OK: %s and are: \n%s\n\n" % (len(NOKEY), NOKEY, len(ERROR), ERROR, len(OK), OK, len(NOK), NOK))
+Users having a GID/PubKey correpondence NOT OK: %s and are: \n%s\n\n"
+ % (len(NOKEY), NOKEY, len(ERROR), ERROR,
+ len(OK), OK, len(NOK), NOK))
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default=None)
@add_options('-e', '--email', dest='email', default="",
help="email (mandatory for users)")
@add_options('-u', '--url', dest='url', metavar='<url>', default=None,
help="URL, useful for slices")
- @add_options('-d', '--description', dest='description', metavar='<description>',
+ @add_options('-d', '--description', dest='description',
+ metavar='<description>',
help='Description, useful for slices', default=None)
- @add_options('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
+ @add_options('-k', '--key', dest='key', metavar='<key>',
+ help='public key string or file',
default=None)
- @add_options('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
- default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @add_options('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
- default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @add_options('-s', '--slices', dest='slices', metavar='<slices>',
+ help='Set/replace slice xrns',
+ default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
+ @add_options('-r', '--researchers', dest='researchers',
+ metavar='<researchers>', help='Set/replace slice researchers',
+ default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
@add_options('-p', '--pis', dest='pis', metavar='<PIs>',
help='Set/replace Principal Investigators/Project Managers',
- default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @add_options('-X', '--extra', dest='extras', default={}, type='str', metavar="<EXTRA_ASSIGNS>",
- action="callback", callback=optparse_dictvalue_callback, nargs=1,
- help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
+ default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
+ @add_options('-X', '--extra', dest='extras',
+ default={}, type='str', metavar="<EXTRA_ASSIGNS>",
+ action="callback", callback=optparse_dictvalue_callback,
+ nargs=1,
+ help="set extra/testbed-dependent flags,"
+ " e.g. --extra enabled=true")
def register(self, xrn, type=None, email='', key=None,
slices='', pis='', researchers='',
url=None, description=None, extras={}):
"""Create a new Registry record"""
- record_dict = self._record_dict(xrn=xrn, type=type, email=email, key=key,
- slices=slices, researchers=researchers, pis=pis,
- url=url, description=description, extras=extras)
+ record_dict = self._record_dict(
+ xrn=xrn, type=type, email=email, key=key,
+ slices=slices, researchers=researchers, pis=pis,
+ url=url, description=description, extras=extras)
self.api.manager.Register(self.api, record_dict)
-
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
- @add_options('-u', '--url', dest='url', metavar='<url>', help='URL', default=None)
- @add_options('-d', '--description', dest='description', metavar='<description>',
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default=None)
+ @add_options('-u', '--url', dest='url', metavar='<url>',
+ help='URL', default=None)
+ @add_options('-d', '--description', dest='description',
+ metavar='<description>',
help='Description', default=None)
- @add_options('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
+ @add_options('-k', '--key', dest='key', metavar='<key>',
+ help='public key string or file',
default=None)
- @add_options('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns',
- default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @add_options('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers',
- default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @add_options('-s', '--slices', dest='slices', metavar='<slices>',
+ help='Set/replace slice xrns',
+ default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
+ @add_options('-r', '--researchers', dest='researchers',
+ metavar='<researchers>', help='Set/replace slice researchers',
+ default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
@add_options('-p', '--pis', dest='pis', metavar='<PIs>',
help='Set/replace Principal Investigators/Project Managers',
- default='', type="str", action='callback', callback=optparse_listvalue_callback)
- @add_options('-X', '--extra', dest='extras', default={}, type='str', metavar="<EXTRA_ASSIGNS>",
- action="callback", callback=optparse_dictvalue_callback, nargs=1,
- help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
+ default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
+ @add_options('-X', '--extra', dest='extras', default={}, type='str',
+ metavar="<EXTRA_ASSIGNS>", nargs=1,
+ action="callback", callback=optparse_dictvalue_callback,
+ help="set extra/testbed-dependent flags,"
+ " e.g. --extra enabled=true")
def update(self, xrn, type=None, email='', key=None,
slices='', pis='', researchers='',
url=None, description=None, extras={}):
"""Update an existing Registry record"""
- record_dict = self._record_dict(xrn=xrn, type=type, email=email, key=key,
- slices=slices, researchers=researchers, pis=pis,
- url=url, description=description, extras=extras)
+ record_dict = self._record_dict(
+ xrn=xrn, type=type, email=email, key=key,
+ slices=slices, researchers=researchers, pis=pis,
+ url=url, description=description, extras=extras)
self.api.manager.Update(self.api, record_dict)
-
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default=None)
def remove(self, xrn, type=None):
"""Remove given object from the registry"""
xrn = Xrn(xrn, type)
self.api.manager.Remove(self.api, xrn)
-
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default=None)
def credential(self, xrn, type=None):
"""Invoke GetCredential"""
cred = self.api.manager.GetCredential(
def import_registry(self):
"""Run the importer"""
+ if not DEBUG:
+ init_logger('import')
from sfa.importer import Importer
importer = Importer()
importer.run()
dbschema.init_or_upgrade()
- @add_options('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False,
- help='Remove all registry records and all files in %s area' % help_basedir)
- @add_options('-c', '--certs', dest='certs', metavar='<certs>', action='store_true', default=False,
- help='Remove all cached certs/gids found in %s' % help_basedir)
- @add_options('-0', '--no-reinit', dest='reinit', metavar='<reinit>', action='store_false', default=True,
- help='Prevents new DB schema from being installed after cleanup')
+ @add_options('-a', '--all', dest='all', metavar='<all>',
+ action='store_true', default=False,
+ help='Remove all registry records and all files in %s area'
+ % help_basedir)
+ @add_options('-c', '--certs', dest='certs',
+ metavar='<certs>', action='store_true', default=False,
+ help='Remove all cached certs/gids found in %s'
+ % help_basedir)
+ @add_options('-0', '--no-reinit', dest='reinit', metavar='<reinit>',
+ action='store_false', default=True,
+ help="Prevents new DB schema"
+ " from being installed after cleanup")
def nuke(self, all=False, certs=False, reinit=True):
- """Cleanup local registry DB, plus various additional filesystem cleanups optionally"""
+ """
+ Cleanup local registry DB, plus various additional
+ filesystem cleanups optionally
+ """
from sfa.storage.dbschema import DBSchema
- from sfa.util.sfalogging import _SfaLogger
- logger = _SfaLogger(
- logfile='/var/log/sfa_import.log', loggername='importlog')
+ from sfa.util.sfalogging import init_logger, logger
+ init_logger('import')
logger.setLevelFromOptVerbose(self.api.config.SFA_API_LOGLEVEL)
logger.info("Purging SFA records from database")
dbschema = DBSchema()
dbschema.nuke()
- # for convenience we re-create the schema here, so there's no need for an explicit
+ # for convenience we re-create the schema here,
+ # so there's no need for an explicit
# service sfa restart
# however in some (upgrade) scenarios this might be wrong
if reinit:
def import_gid(self, xrn):
pass
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
- @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
- @add_options('-o', '--outfile', dest='outfile', metavar='<outfile>', help='output file', default=None)
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
+ @add_options('-t', '--type', dest='type', metavar='<type>',
+ help='object type', default=None)
+ @add_options('-o', '--outfile', dest='outfile', metavar='<outfile>',
+ help='output file', default=None)
def export(self, xrn, type=None, outfile=None):
"""Fetch an object's GID from the Registry"""
from sfa.storage.model import RegRecord
try:
auth_info = hierarchy.get_auth_info(hrn)
gid = auth_info.gid_object
- except:
+ except Exception:
print("Record: %s not found" % hrn)
sys.exit(1)
# save to file
outfile = os.path.abspath('./%s.gid' % gid.get_hrn())
gid.save_to_file(outfile, save_parents=True)
- @add_options('-g', '--gidfile', dest='gid', metavar='<gid>', help='path of gid file to display (mandatory)')
+ @add_options('-g', '--gidfile', dest='gid', metavar='<gid>',
+ help='path of gid file to display (mandatory)')
def display(self, gidfile):
"""Print contents of a GID file"""
gid_path = os.path.abspath(gidfile)
version = self.api.manager.GetVersion(self.api, {})
pprinter.pprint(version)
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='object hrn/urn (mandatory)')
def status(self, xrn):
- """Retrieve the status of the slivers belonging to the named slice (Status)"""
+ """
+ Retrieve the status of the slivers
+ belonging to the named slice (Status)
+ """
urns = [Xrn(xrn, 'slice').get_urn()]
status = self.api.manager.Status(self.api, urns, [], {})
pprinter.pprint(status)
- @add_options('-r', '--rspec-version', dest='rspec_version', metavar='<rspec_version>',
- default='GENI', help='version/format of the resulting rspec response')
+ @add_options('-r', '--rspec-version', dest='rspec_version',
+ metavar='<rspec_version>', default='GENI',
+ help='version/format of the resulting rspec response')
def resources(self, rspec_version='GENI'):
"""Display the available resources at an aggregate"""
options = {'geni_rspec_version': rspec_version}
resources = self.api.manager.ListResources(self.api, [], options)
print(resources)
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
- @add_options('-r', '--rspec', dest='rspec', metavar='<rspec>', help='rspec file (mandatory)')
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='slice hrn/urn (mandatory)')
+ @add_options('-r', '--rspec', dest='rspec', metavar='<rspec>',
+ help='rspec file (mandatory)')
def allocate(self, xrn, rspec):
"""Allocate slivers"""
xrn = Xrn(xrn, 'slice')
self.api, slice_urn, [], rspec_string, options)
print(manifest)
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='slice hrn/urn (mandatory)')
def provision(self, xrn):
"""Provision slivers"""
xrn = Xrn(xrn, 'slice')
self.api, [slice_urn], [], options)
print(manifest)
- @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)')
+ @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>',
+ help='slice hrn/urn (mandatory)')
def delete(self, xrn):
"""Delete slivers"""
self.api.manager.Delete(self.api, [xrn], [], {})
-class SliceManagerCommands(AggregateCommands):
-
- def __init__(self, *args, **kwds):
- self.api = Generic.the_flavour().make_api(interface='slicemgr')
-
-
class SfaAdmin:
- CATEGORIES = {'certificate': CertCommands,
- 'registry': RegistryCommands,
- 'aggregate': AggregateCommands,
- 'slicemgr': SliceManagerCommands}
+ CATEGORIES = {
+ 'certificate': CertCommands,
+ 'registry': RegistryCommands,
+ 'aggregate': AggregateCommands,
+ }
# returns (name,class) or (None,None)
def find_category(self, input):
if name.startswith('_'):
continue
margin = 15
- format = "%%-%ds" % margin
print("%-15s" % name, end=' ')
doc = getattr(method, '__doc__', None)
if not doc:
sys.exit(0)
except TypeError:
print("Possible wrong number of arguments supplied")
- #import traceback
- # traceback.print_exc()
print(command.__doc__)
parser.print_help()
sys.exit(1)
# default is for when we can't determine the type of the service
# typically the server is down, or we can't authenticate, or it's too old
# code
- shapes = {"registry": "diamond", "slicemgr": "ellipse",
+ shapes = {"registry": "diamond",
"aggregate": "box", 'default': 'plaintext'}
- abbrevs = {"registry": "REG", "slicemgr": "SA",
+ abbrevs = {"registry": "REG",
"aggregate": "AM", 'default': '[unknown interface]'}
# return a dictionary that translates into the node's attr
shape = Interface.shapes['default']
layout['shape'] = shape
# fill color to outline wrongly configured or unreachable bodies
- # as of sfa-2.0 registry doesn't have 'sfa' not 'geni_api', but have peers
- # slicemgr and aggregate have 'geni_api' and 'sfa'
+ # as of sfa-2.0 registry doesn't have 'sfa' not 'geni_api',
+ # but have peer aggregates have 'geni_api' and 'sfa'
if 'geni_api' not in version and 'peers' not in version:
layout['style'] = 'filled'
layout['fillcolor'] = 'gray'
-#
-# sfi.py - basic SFA command-line client
-# this module is also used in sfascan
-#
+"""
+sfi.py - basic SFA command-line client
+this module is also used in sfascan
+"""
+
+# pylint: disable=c0111, c0413
from __future__ import print_function
import os
import os.path
-import socket
import re
import datetime
import codecs
from sfa.trust.sfaticket import SfaTicket
from sfa.util.faults import SfaInvalidArgument
-from sfa.util.sfalogging import sfi_logger
+from sfa.util.sfalogging import init_logger, logger
from sfa.util.xrn import get_leaf, get_authority, hrn_to_urn, Xrn
from sfa.util.config import Config
from sfa.util.version import version_core
self.options = options
self.user = None
self.authority = None
- self.logger = sfi_logger
- self.logger.enable_console()
+ logger.enable_console()
# various auxiliary material that we keep at hand
self.command = None
# need to call this other than just 'config' as we have a
msg = "Invalid command\n"
msg += "Commands: "
msg += ','.join(commands_list)
- self.logger.critical(msg)
+ logger.critical(msg)
sys.exit(2)
# retrieve args_string
parser.add_option("-p", "--pi", dest='delegate_pi', default=None, action='store_true',
help="delegate your PI credentials, so s.t. like -A your_hrn^")
parser.add_option("-A", "--to-authority", dest='delegate_to_authority', action='store_true', default=False,
- help="""by default the mandatory argument is expected to be a user,
+ help="""by default the mandatory argument is expected to be a user,
use this if you mean an authority instead""")
if canonical in ("myslice"):
return method(command_options, command_args)
def main(self):
+ init_logger('cli')
self.sfi_parser = self.create_parser_global()
(options, args) = self.sfi_parser.parse_args()
if options.help:
sys.exit(1)
self.options = options
- self.logger.setLevelFromOptVerbose(self.options.verbose)
+ logger.setLevelFromOptVerbose(self.options.verbose)
if len(args) <= 0:
- self.logger.critical("No command given. Use -h for help.")
+ logger.critical("No command given. Use -h for help.")
self.print_commands_help(options)
return -1
self.read_config()
self.bootstrap()
- self.logger.debug("Command={}".format(self.command))
+ logger.debug("Command={}".format(self.command))
try:
retcod = self.dispatch(command, command_options, command_args)
except SystemExit:
return 1
except:
- self.logger.log_exc("sfi command {} failed".format(command))
+ logger.log_exc("sfi command {} failed".format(command))
return 1
return retcod
config.save(config_file)
except:
- self.logger.critical(
+ logger.critical(
"Failed to read configuration file {}".format(config_file))
- self.logger.info(
+ logger.info(
"Make sure to remove the export clauses and to add quotes")
if self.options.verbose == 0:
- self.logger.info("Re-run with -v for more details")
+ logger.info("Re-run with -v for more details")
else:
- self.logger.log_exc(
+ logger.log_exc(
"Could not read config file {}".format(config_file))
sys.exit(1)
elif hasattr(config, "SFI_SM"):
self.sm_url = config.SFI_SM
else:
- self.logger.error(
+ logger.error(
"You need to set e.g. SFI_SM='http://your.slicemanager.url:12347/' in {}".format(config_file))
errors += 1
elif hasattr(config, "SFI_REGISTRY"):
self.reg_url = config.SFI_REGISTRY
else:
- self.logger.error(
+ logger.error(
"You need to set e.g. SFI_REGISTRY='http://your.registry.url:12345/' in {}".format(config_file))
errors += 1
elif hasattr(config, "SFI_USER"):
self.user = config.SFI_USER
else:
- self.logger.error(
+ logger.error(
"You need to set e.g. SFI_USER='plc.princeton.username' in {}".format(config_file))
errors += 1
elif hasattr(config, "SFI_AUTH"):
self.authority = config.SFI_AUTH
else:
- self.logger.error(
+ logger.error(
"You need to set e.g. SFI_AUTH='plc.princeton' in {}".format(config_file))
errors += 1
# init self-signed cert, user credentials and gid
def bootstrap(self):
if self.options.verbose:
- self.logger.info(
+ logger.info(
"Initializing SfaClientBootstrap with {}".format(self.reg_url))
client_bootstrap = SfaClientBootstrap(self.user, self.reg_url, self.options.sfi_dir,
- logger=self.logger)
+ logger=logger)
# if -k is provided, use this to initialize private key
if self.options.user_private_key:
client_bootstrap.init_private_key_if_missing(
# trigger legacy compat code if needed
# the name has changed from just <leaf>.pkey to <hrn>.pkey
if not os.path.isfile(client_bootstrap.private_key_filename()):
- self.logger.info("private key not found, trying legacy name")
+ logger.info("private key not found, trying legacy name")
try:
legacy_private_key = os.path.join(self.options.sfi_dir, "{}.pkey"
.format(Xrn.unescape(get_leaf(self.user))))
- self.logger.debug("legacy_private_key={}"
+ logger.debug("legacy_private_key={}"
.format(legacy_private_key))
client_bootstrap.init_private_key_if_missing(
legacy_private_key)
- self.logger.info("Copied private key from legacy location {}"
+ logger.info("Copied private key from legacy location {}"
.format(legacy_private_key))
except:
- self.logger.log_exc("Can't find private key ")
+ logger.log_exc("Can't find private key ")
sys.exit(1)
# make it bootstrap
def my_authority_credential_string(self):
if not self.authority:
- self.logger.critical(
+ logger.critical(
"no authority specified. Use -a or set SF_AUTH")
sys.exit(-1)
return self.client_bootstrap.authority_credential_string(self.authority)
object_hrn = object_gid.get_hrn()
if not object_cred.get_privileges().get_all_delegate():
- self.logger.error("Object credential {} does not have delegate bit set"
+ logger.error("Object credential {} does not have delegate bit set"
.format(object_hrn))
return
def registry(self):
# cache the result
if not hasattr(self, 'registry_proxy'):
- self.logger.info("Contacting Registry at: {}".format(self.reg_url))
+ logger.info("Contacting Registry at: {}".format(self.reg_url))
self.registry_proxy \
= SfaServerProxy(self.reg_url, self.private_key, self.my_gid,
timeout=self.options.timeout, verbose=self.options.debug)
records = self.registry().Resolve(node_hrn, self.my_credential_string)
records = filter_records('node', records)
if not records:
- self.logger.warning(
+ logger.warning(
"No such component:{}".format(opts.component))
record = records[0]
cm_url = "http://{}:{}/".format(record['hostname'], CM_PORT)
# the config
if not self.sm_url.startswith('http://') or self.sm_url.startswith('https://'):
self.sm_url = 'http://' + self.sm_url
- self.logger.info(
+ logger.info(
"Contacting Slice Manager at: {}".format(self.sm_url))
self.sliceapi_proxy \
= SfaServerProxy(self.sm_url, self.private_key, self.my_gid,
cache = Cache(cache_file)
except IOError:
cache = Cache()
- self.logger.info("Local cache not found at: {}".format(cache_file))
+ logger.info("Local cache not found at: {}".format(cache_file))
if cache:
version = cache.get(cache_key)
version = ReturnValue.get_value(result)
# cache version for 20 minutes
cache.add(cache_key, version, ttl=60 * 20)
- self.logger.info("Updating cache file {}".format(cache_file))
+ logger.info("Updating cache file {}".format(cache_file))
cache.save_to_file(cache_file)
return version
# resurrect this temporarily so we can support V1 aggregates for a while
def server_supports_options_arg(self, server):
"""
- Returns true if server support the optional call_id arg, false otherwise.
+ Returns true if server support the optional call_id arg, false otherwise.
"""
server_version = self.get_cached_server_version(server)
result = False
if (os.path.isfile(file)):
return file
else:
- self.logger.critical("No such rspec file {}".format(rspec))
+ logger.critical("No such rspec file {}".format(rspec))
sys.exit(1)
def get_record_file(self, record):
if (os.path.isfile(file)):
return file
else:
- self.logger.critical(
+ logger.critical(
"No such registry record file {}".format(record))
sys.exit(1)
hrn, self.my_credential_string, resolve_options)
record_dicts = filter_records(options.type, record_dicts)
if not record_dicts:
- self.logger.error("No record of type {}".format(options.type))
+ logger.error("No record of type {}".format(options.type))
return
# user has required to focus on some keys
if options.keys:
@declare_command("[xml-filename]", "", ['add'])
def register(self, options, args):
"""
- create new record in registry (Register)
- from command line options (recommended)
+ create new record in registry (Register)
+ from command line options (recommended)
old-school method involving an xml file still supported
"""
if len(args) > 1:
@declare_command("[xml-filename]", "")
def update(self, options, args):
"""
- update record into registry (Update)
- from command line options (recommended)
+ update record into registry (Update)
+ from command line options (recommended)
old-school method involving an xml file still supported
"""
if len(args) > 1:
@declare_command("slice_hrn", "")
def describe(self, options, args):
"""
- shows currently allocated/provisioned resources
- of the named slice or set of slivers (Describe)
+ shows currently allocated/provisioned resources
+ of the named slice or set of slivers (Describe)
"""
if len(args) != 1:
self.print_help()
else:
filename = os.sep.join(
[self.options.sfi_dir, '{}.gid'.format(target_hrn)])
- self.logger.info("writing {} gid to {}".format(target_hrn, filename))
+ logger.info("writing {} gid to {}".format(target_hrn, filename))
GID(string=gid).save_to_file(filename)
# xxx should analyze result
return 0
filename = os.path.join(self.options.sfi_dir,
"{}_for_{}.{}.cred".format(message, to_hrn, to_type))
delegated_credential.save_to_file(filename, save_parents=True)
- self.logger.info("delegated credential for {} to {} and wrote to {}"
+ logger.info("delegated credential for {} to {} and wrote to {}"
.format(message, to_hrn, filename))
####################
* compute all the slices that you currently have credentials on
* refresh all your credentials (you as a user and pi, your slices)
* upload them to the manifold backend server
- for last phase, sfi_config is read to look for the [myslice] section,
+ for last phase, sfi_config is read to look for the [myslice] section,
and namely the 'backend', 'delegate' and 'user' settings
"""
self.print_help()
sys.exit(1)
# enable info by default
- self.logger.setLevelFromOptVerbose(self.options.verbose + 1)
+ logger.setLevelFromOptVerbose(self.options.verbose + 1)
# the rough sketch goes like this
# (0) produce a p12 file
self.client_bootstrap.my_pkcs12()
sys.exit(1)
# (b) figure whether we are PI for the authority where we belong
- self.logger.info("Resolving our own id {}".format(self.user))
+ logger.info("Resolving our own id {}".format(self.user))
my_records = self.registry().Resolve(self.user, self.my_credential_string)
if len(my_records) != 1:
print("Cannot Resolve {} -- exiting".format(self.user))
sys.exit(1)
my_record = my_records[0]
my_auths_all = my_record['reg-pi-authorities']
- self.logger.info(
+ logger.info(
"Found {} authorities that we are PI for".format(len(my_auths_all)))
- self.logger.debug("They are {}".format(my_auths_all))
+ logger.debug("They are {}".format(my_auths_all))
my_auths = my_auths_all
if options.delegate_auths:
my_auths = list(set(my_auths_all).intersection(
set(options.delegate_auths)))
- self.logger.debug(
+ logger.debug(
"Restricted to user-provided auths {}".format(my_auths))
# (c) get the set of slices that we are in
my_slices_all = my_record['reg-slices']
- self.logger.info(
+ logger.info(
"Found {} slices that we are member of".format(len(my_slices_all)))
- self.logger.debug("They are: {}".format(my_slices_all))
+ logger.debug("They are: {}".format(my_slices_all))
my_slices = my_slices_all
# if user provided slices, deal only with these - if they are found
if options.delegate_slices:
my_slices = list(set(my_slices_all).intersection(
set(options.delegate_slices)))
- self.logger.debug(
+ logger.debug(
"Restricted to user-provided slices: {}".format(my_slices))
# (d) make sure we have *valid* credentials for all these
.format(hrn, htype, delegatee_hrn, delegatee_type))
with open(filename, 'w') as f:
f.write(delegated_credential)
- self.logger.debug("(Over)wrote {}".format(filename))
+ logger.debug("(Over)wrote {}".format(filename))
hrn_delegated_credentials.append(
(hrn, htype, delegated_credential, filename, ))
# (f) and finally upload them to manifold server
# xxx todo add an option so the password can be set on the command line
# (but *NOT* in the config file) so other apps can leverage this
- self.logger.info("Uploading on backend at {}".format(
+ logger.info("Uploading on backend at {}".format(
myslice_dict['backend']))
- uploader = ManifoldUploader(logger=self.logger,
+ uploader = ManifoldUploader(logger=logger,
url=myslice_dict['backend'],
platform=myslice_dict['platform'],
username=myslice_dict['username'],
if uploader.upload(delegated_credential, message=message):
count_success += 1
count_all += 1
- self.logger.info("Successfully uploaded {}/{} credentials"
+ logger.info("Successfully uploaded {}/{} credentials"
.format(count_success, count_all))
# at first I thought we would want to save these,
gid = GID(string=trusted_cert)
gid.dump()
cert = Certificate(string=trusted_cert)
- self.logger.debug('Sfi.trusted -> {}'.format(cert.get_subject()))
+ logger.debug('Sfi.trusted -> {}'.format(cert.get_subject()))
print("Certificate:\n{}\n\n".format(trusted_cert))
# xxx should analyze result
return 0
+++ /dev/null
-from sfa.util.sfalogging import logger
-from sfa.util.faults import SfaFault
-
-# this is probably too big to swallow but for a starting point..
-from sfa.planetlab.pldriver import PlDriver
-
-from sfa.federica.fdshell import FdShell
-
-# hardwired for now
-# this could/should be obtained by issuing getRSpecVersion
-federica_version_string = "RSpecV2"
-
-# avail. methods on the federica side as of 2012/02/13
-# listAvailableResources(String credentials, String rspecVersion)
-# listSliceResources(String credentials, String rspecVersion, String sliceUrn)
-# createSlice(String credentials, String sliceUrn, String rspecVersion, String rspecString)
-# deleteSlice(String credentials, String sliceUrn)
-# listSlices()
-# getRSpecVersion()
-# all return
-# Result: {'code': 0, 'value': RSpec} if success
-# {'code': code_id, 'output': Error message} if error
-
-
-class FdDriver (PlDriver):
-
- def __init__(self, api):
- PlDriver.__init__(self, api)
- config = api.config
- self.shell = FdShell(config)
-
- # the agreement with the federica driver is for them to expose results in a way
- # compliant with the avpi v2 return code, i.e. a dict with 'code' 'value' 'output'
- # essentially, either 'code'==0, then 'value' is set to the actual result
- # otherwise, 'code' is set to an error code and 'output' holds an error
- # message
- def response(self, from_xmlrpc):
- if isinstance(from_xmlrpc, dict) and 'code' in from_xmlrpc:
- if from_xmlrpc['code'] == 0:
- return from_xmlrpc['value']
- else:
- raise SfaFault(from_xmlrpc['code'], from_xmlrpc['output'])
- else:
- logger.warning("unexpected result from federica xmlrpc api")
- return from_xmlrpc
-
- def aggregate_version(self):
- result = {}
- federica_version_string_api = self.response(
- self.shell.getRSpecVersion())
- result['federica_version_string_api'] = federica_version_string_api
- if federica_version_string_api != federica_version_string:
- result['WARNING'] = "hard-wired rspec version %d differs from what the API currently exposes" %\
- federica_version_string
- return result
-
- def testbed_name(self):
- return "federica"
-
- def list_slices(self, creds, options):
- # the issue is that federica returns the list of slice's urn in a string format
- # this is why this dirty hack is needed until federica fixes it.
- slices_str = self.shell.listSlices()['value'][1:-1]
- slices_list = slices_str.split(", ")
- return slices_list
-
- def sliver_status(self, slice_urn, slice_hrn):
- return "fddriver.sliver_status: undefined/todo for slice %s" % slice_hrn
-
- def list_resources(self, slice_urn, slice_hrn, creds, options):
- # right now rspec_version is ignored on the federica side
- # we normally derive it from options
- # look in cache if client has requested so
- cached_requested = options.get('cached', True)
- # global advertisement
- if not slice_hrn:
- # self.cache is initialized unless the global config has it turned
- # off
- if cached_requested and self.cache:
- # using federica_version_string as the key into the cache
- rspec = self.cache.get(federica_version_string)
- if rspec:
- logger.debug(
- "FdDriver.ListResources: returning cached advertisement")
- return self.response(rspec)
- # otherwise, need to get it
- # java code expects creds as a String
-# rspec = self.shell.listAvailableResources (creds, federica_version_string)
- rspec = self.shell.listAvailableResources(
- "", federica_version_string)
-# rspec = self.shell.listAvailableResources (federica_version_string)
- # cache it for future use
- if self.cache:
- logger.debug(
- "FdDriver.ListResources: stores advertisement in cache")
- self.cache.add(federica_version_string, rspec)
- return self.response(rspec)
- # about a given slice : don't cache
- else:
- # java code expects creds as a String
- # return self.response(self.shell.listSliceResources(creds,
- # federica_version_string, slice_urn))
- return self.response(self.shell.listSliceResources("", federica_version_string, slice_urn))
-
- def create_sliver(self, slice_urn, slice_hrn, creds, rspec_string, users, options):
- # right now version_string is ignored on the federica side
- # we normally derive it from options
- # java code expects creds as a String
- # return self.response(self.shell.createSlice(creds, slice_urn,
- # federica_version_string, rspec_string))
- return self.response(self.shell.createSlice("", slice_urn, federica_version_string, rspec_string))
-
- def delete_sliver(self, slice_urn, slice_hrn, creds, options):
- # right now version_string is ignored on the federica side
- # we normally derive it from options
- # xxx not sure if that's currentl supported at all
- # java code expects creds as a String
- # return self.response(self.shell.deleteSlice(creds, slice_urn))
- return self.response(self.shell.deleteSlice("", slice_urn))
-
- # for the the following methods we use what is provided by the default driver class
- # def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
- # def start_slice (self, slice_urn, slice_xrn, creds):
- # def stop_slice (self, slice_urn, slice_xrn, creds):
- # def reset_slice (self, slice_urn, slice_xrn, creds):
- # def get_ticket (self, slice_urn, slice_xrn, creds, rspec, options):
+++ /dev/null
-from sfa.util.sfalogging import logger
-from sfa.util.py23 import xmlrpc_client
-
-
-class FdShell:
- """
- A simple xmlrpc shell to a federica API server
- This class can receive the XMLRPC calls to the federica testbed
- For safety this is limited to a set of hard-coded calls
- """
-
- direct_calls = ['listAvailableResources',
- 'listSliceResources',
- 'createSlice',
- 'deleteSlice',
- 'getRSpecVersion',
- 'listSlices',
- ]
-
- def __init__(self, config):
- url = config.SFA_FEDERICA_URL
- # xxx not sure if java xmlrpc has support for None
- # self.proxy = xmlrpc_client.ServerProxy(url, verbose = False, allow_none = True)
- # xxx turn on verbosity
- self.proxy = xmlrpc_client.ServerProxy(url, verbose=True)
-
- # xxx get credentials from the config ?
- # right now basic auth data goes into the URL
- # so do *not* add any credential at that point
- def __getattr__(self, name):
- def func(*args, **kwds):
- if name not in FdShell.direct_calls:
- raise Exception(
- "Illegal method call %s for FEDERICA driver" % (name))
- logger.info("Issuing %s args=%s kwds=%s to federica" %
- (name, args, kwds))
-# result=getattr(self.proxy, "AggregateManager.%s"%name)(credential, *args, **kwds)
- result = getattr(self.proxy, "AggregateManager.%s" %
- name)(*args, **kwds)
- logger.debug('FdShell %s (%s) returned ... ' % (name, name))
- return result
- return func
# a bundle is the combination of
# (*) an api that reacts on the incoming requests to trigger the API methods
# (*) a manager that implements the function of the service,
-# either aggregate, registry, or slicemgr
+# either aggregate or registry
# (*) a driver that controls the underlying testbed
#
#
if flavour is None:
flavour = config.SFA_GENERIC_FLAVOUR
flavour = flavour.lower()
- #mixed = flavour.capitalize()
+ # mixed = flavour.capitalize()
module_path = "sfa.generic.%s" % flavour
classname = "%s" % flavour
logger.debug("Generic.the_flavour with flavour=%s" % flavour)
def registry_class(self): pass
- def slicemgr_class(self): pass
-
def aggregate_class(self): pass
- def component_class(self): pass
-
# build an API object
# insert a manager instance
def make_api(self, *args, **kwargs):
def make_manager(self, interface):
"""
- interface expected in ['registry', 'aggregate', 'slicemgr', 'component']
+ interface expected in ['registry', 'aggregate']
flavour is e.g. 'pl' or 'max' or whatever
"""
flavour = self.flavour
try:
module_or_class = getattr(self, classname)()
logger.debug("%s : %s" % (message, module_or_class))
- # this gets passed to ManagerWrapper that will call the class constructor
- # if it's a class, or use the module as is if it's a module
+ # this gets passed to ManagerWrapper that will
+ # call the class constructor if it's a class,
+ # or use the module as is if it's a module
# so bottom line is, don't try the constructor here
return module_or_class
- except:
- logger.log_exc_critical(message)
+ except Exception:
+ logger.log_exc(message)
+ exit(1)
# need interface to select the right driver
def make_driver(self, api):
message = "Generic.make_driver for flavour=%s and interface=%s" % (
flavour, interface)
- if interface == "component":
- classname = "component_driver_class"
- else:
- classname = "driver_class"
+ classname = "driver_class"
try:
class_obj = getattr(self, classname)()
logger.debug("%s : %s" % (message, class_obj))
return class_obj(api)
- except:
- logger.log_exc_critical(message)
+ except Exception:
+ logger.log_exc(message)
+ exit(1)
. api: this object reacts to an incoming SFA request
. manager: this implements a given interface, either registry,
-aggregate, or slicemgr
+or aggregate
. driver: this object is in charge of actually talking to the
underlying testbed
* a call to make_api will then create the 3 elements with the
following layout:
-api.manager
+api.manager
api.driver
driver.api
------
more in sfa/generic/__init__.py
-
-
import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
- def slicemgr_manager_class(self):
- import sfa.managers.slice_manager
- return sfa.managers.slice_manager.SliceManager
-
def aggregate_manager_class(self):
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
+++ /dev/null
-#
-from sfa.generic.pl import pl
-
-import sfa.federica.fddriver
-
-# the federica flavour behaves like pl, except for
-# the driver
-
-
-class fd (pl):
-
- def driver_class(self):
- import sfa.managers.v2_to_v3_adapter
- return sfa.managers.v2_to_v3_adapter.V2ToV3Adapter
import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
- def slicemgr_manager_class(self):
- import sfa.managers.slice_manager
- return sfa.managers.slice_manager.SliceManager
-
def aggregate_manager_class(self):
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
def driver_class(self):
import sfa.iotlab.iotlabdriver
return sfa.iotlab.iotlabdriver.IotLabDriver
-
- def component_manager_class(self):
- return None
- # driver_class
-
- def component_driver_class(self):
- return None
+++ /dev/null
-# an example of how to plugin the max aggregate manager with the flavour model
-# might need to be tested
-#
-from sfa.generic.pl import pl
-
-
-class max (pl):
-
- # the max flavour behaves like pl, except for
- # the aggregate
- def aggregate_manager_class(self):
- import sfa.managers.aggregate_manager_max
- return sfa.managers.aggregate_manager_max.AggregateManagerMax
-
-# I believe the component stuff is not implemented
- def component_manager_class(self):
- return None
-
- def component_driver_class(self):
- return None
+++ /dev/null
-from sfa.generic import Generic
-
-
-class nitos (Generic):
-
- # the importer class
- def importer_class(self):
- import sfa.importer.nitosimporter
- return sfa.importer.nitosimporter.NitosImporter
-
- # use the standard api class
- def api_class(self):
- import sfa.server.sfaapi
- return sfa.server.sfaapi.SfaApi
-
- # the manager classes for the server-side services
- def registry_manager_class(self):
- import sfa.managers.registry_manager
- return sfa.managers.registry_manager.RegistryManager
-
- def slicemgr_manager_class(self):
- import sfa.managers.slice_manager
- return sfa.managers.slice_manager.SliceManager
-
- def aggregate_manager_class(self):
- import sfa.managers.aggregate_manager
- return sfa.managers.aggregate_manager.AggregateManager
-
- # driver class for server-side services, talk to the whole testbed
- def driver_class(self):
- import sfa.managers.v2_to_v3_adapter
- return sfa.managers.v2_to_v3_adapter.V2ToV3Adapter
-
- # for the component mode, to be run on board planetlab nodes
- # manager class
- def component_manager_class(self):
- return None
- # driver_class
-
- def component_driver_class(self):
- return None
+++ /dev/null
-from sfa.generic import Generic
-
-import sfa.server.sfaapi
-import sfa.openstack.nova_driver
-import sfa.managers.registry_manager_openstack
-import sfa.managers.aggregate_manager
-import sfa.managers.slice_manager
-
-# use pl as a model so we only redefine what's different
-from sfa.generic.pl import pl
-
-
-class openstack (pl):
-
- # the importer class
- def importer_class(self):
- import sfa.importer.openstackimporter
- return sfa.importer.openstackimporter.OpenstackImporter
-
- # the manager classes for the server-side services
- def registry_manager_class(self):
- return sfa.managers.registry_manager_openstack.RegistryManager
-
- def aggregate_manager_class(self):
- return sfa.managers.aggregate_manager.AggregateManager
-
- # driver class for server-side services, talk to the whole testbed
- def driver_class(self):
- return sfa.openstack.nova_driver.NovaDriver
import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
- def slicemgr_manager_class(self):
- import sfa.managers.slice_manager
- return sfa.managers.slice_manager.SliceManager
-
def aggregate_manager_class(self):
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
def driver_class(self):
import sfa.planetlab.pldriver
return sfa.planetlab.pldriver.PlDriver
-
- # for the component mode, to be run on board planetlab nodes
- # manager class
- def component_manager_class(self):
- import sfa.managers
- return sfa.managers.component_manager_pl
- # driver_class
-
- def component_driver_class(self):
- import sfa.planetlab.plcomponentdriver
- return sfa.planetlab.plcomponentdriver.PlComponentDriver
import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
- def slicemgr_manager_class(self):
- import sfa.managers.slice_manager
- return sfa.managers.slice_manager.SliceManager
-
def aggregate_manager_class(self):
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
import sfa.managers.registry_manager
return sfa.managers.registry_manager.RegistryManager
- def slicemgr_manager_class(self):
- import sfa.managers.slice_manager
- return sfa.managers.slice_manager.SliceManager
- # most likely you'll want to turn OFF the aggregate in sfa-config-tty
- # SFA_AGGREGATE_ENABLED=false
-
def aggregate_manager_class(self):
import sfa.managers.aggregate_manager
return sfa.managers.aggregate_manager.AggregateManager
#!/usr/bin/python
-import sys
+# pylint: disable=c0111, w1201, w0622
+
from datetime import datetime
from sfa.util.xrn import get_authority, hrn_to_urn
from sfa.generic import Generic
from sfa.util.config import Config
-from sfa.util.sfalogging import _SfaLogger
+from sfa.util.sfalogging import init_logger, logger as default_logger
from sfa.trust.hierarchy import Hierarchy
-#from sfa.trust.trustedroots import TrustedRoots
+# from sfa.trust.trustedroots import TrustedRoots
from sfa.trust.gid import create_uuid
# using global alchemy.session() here is fine
# as importer is on standalone one-shot process
from sfa.storage.alchemy import global_dbsession
from sfa.storage.model import RegRecord, RegAuthority, RegUser
-from sfa.trust.certificate import convert_public_key, Keypair
+# note on logging
+# it is doubtful that anyone ever used the ability to
+# pass a logger to this class, and that can probably be
+# thrown away.
+# However a quick attempt showed that it seems to
+# also require changes in the Generic layer
class Importer:
self.auth_hierarchy = auth_hierarchy
else:
self.auth_hierarchy = Hierarchy()
- if logger is not None:
- self.logger = logger
+ if logger is None:
+ # redirect in sfa-import.log
+ self.logger = default_logger
+ init_logger('import')
else:
- self.logger = _SfaLogger(
- logfile='/var/log/sfa_import.log', loggername='importlog')
- self.logger.setLevelFromOptVerbose(self.config.SFA_API_LOGLEVEL)
+ self.logger = logger
+ self.logger.setLevelFromOptVerbose(self.config.SFA_API_LOGLEVEL)
# ugly side effect so that other modules get it right
import sfa.util.sfalogging
sfa.util.sfalogging.logger = logger
# self.TrustedRoots = TrustedRoots(self.config.get_trustedroots_dir())
# check before creating a RegRecord entry as we run this over and over
- def record_exists(self, type, hrn):
- return global_dbsession.query(RegRecord).filter_by(hrn=hrn, type=type).count() != 0
+ @staticmethod
+ def record_exists(type, hrn):
+ return (global_dbsession.query(RegRecord)
+ .filter_by(hrn=hrn, type=type).count() != 0)
def create_top_level_auth_records(self, hrn):
"""
- Create top level db records (includes root and sub authorities (local/remote)
+ Create top level db records
+ includes root and sub authorities (local/remote)
"""
# make sure parent exists
parent_hrn = get_authority(hrn)
self.logger.info(
"SfaImporter: imported authority (parent) %s " % auth_record)
- def create_sm_client_record(self):
- """
- Create a user record for the Slicemanager service.
- """
- hrn = self.interface_hrn + '.slicemanager'
- urn = hrn_to_urn(hrn, 'user')
- if not self.auth_hierarchy.auth_exists(urn):
- self.logger.info("SfaImporter: creating Slice Manager user")
- self.auth_hierarchy.create_auth(urn)
-
- if self.record_exists('user', hrn):
- return
- auth_info = self.auth_hierarchy.get_auth_info(hrn)
- user_record = RegUser(hrn=hrn, gid=auth_info.get_gid_object(),
- authority=get_authority(hrn))
- user_record.just_created()
- global_dbsession.add(user_record)
- global_dbsession.commit()
- self.logger.info(
- "SfaImporter: importing user (slicemanager) %s " % user_record)
-
def create_interface_records(self):
"""
Create a record for each SFA interface
if importer_class:
begin_time = datetime.utcnow()
self.logger.info(30 * '=')
- self.logger.info("Starting import on %s, using class %s from flavour %s" %
- (begin_time, importer_class.__name__, generic.flavour))
+ self.logger.info(
+ "Starting import on %s, using class %s from flavour %s" %
+ (begin_time, importer_class.__name__, generic.flavour))
testbed_importer = importer_class(auth_hierarchy, self.logger)
if testbed_importer:
testbed_importer.add_options(options)
except:
continue
if not pkey:
- self.logger.warn(
+ self.logger.warning(
'DummyImporter: unable to convert public key for %s' % user_hrn)
pkey = Keypair(create=True)
else:
# the user has no keys. Creating a random keypair for
# the user's gid
- self.logger.warn(
+ self.logger.warning(
"DummyImporter: user %s does not have a NITOS public key" % user_hrn)
pkey = Keypair(create=True)
return (pubkey, pkey)
+++ /dev/null
-
-import os
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-
-from sfa.trust.gid import create_uuid
-from sfa.trust.certificate import convert_public_key, Keypair
-
-# using global alchemy.session() here is fine
-# as importer is on standalone one-shot process
-from sfa.storage.alchemy import global_dbsession
-from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
-
-from sfa.nitos.nitosshell import NitosShell
-from sfa.nitos.nitosxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_nitos_slicename, username_to_hrn
-
-
-def _get_site_hrn(interface_hrn, site):
- hrn = ".".join([interface_hrn, site['name']])
- return hrn
-
-
-class NitosImporter:
-
- def __init__(self, auth_hierarchy, logger):
- self.auth_hierarchy = auth_hierarchy
- self.logger = logger
-
- def add_options(self, parser):
- # we don't have any options for now
- pass
-
- # hrn hash is initialized from current db
- # remember just-created records as we go
- # xxx might make sense to add a UNIQUE constraint in the db itself
- def remember_record_by_hrn(self, record):
- tuple = (record.type, record.hrn)
- if tuple in self.records_by_type_hrn:
- self.logger.warning(
- "NitosImporter.remember_record_by_hrn: duplicate (%s,%s)" % tuple)
- return
- self.records_by_type_hrn[tuple] = record
-
- # ditto for pointer hash
- def remember_record_by_pointer(self, record):
- if record.pointer == -1:
- self.logger.warning(
- "NitosImporter.remember_record_by_pointer: pointer is void")
- return
- tuple = (record.type, record.pointer)
- if tuple in self.records_by_type_pointer:
- self.logger.warning(
- "NitosImporter.remember_record_by_pointer: duplicate (%s,%s)" % tuple)
- return
- self.records_by_type_pointer[(record.type, record.pointer,)] = record
-
- def remember_record(self, record):
- self.remember_record_by_hrn(record)
- self.remember_record_by_pointer(record)
-
- def locate_by_type_hrn(self, type, hrn):
- return self.records_by_type_hrn.get((type, hrn), None)
-
- def locate_by_type_pointer(self, type, pointer):
- return self.records_by_type_pointer.get((type, pointer), None)
-
- # a convenience/helper function to see if a record is already known
- # a former, broken, attempt (in 2.1-9) had been made
- # to try and use 'pointer' as a first, most significant attempt
- # the idea being to preserve stuff as much as possible, and thus
- # to avoid creating a new gid in the case of a simple hrn rename
- # however this of course doesn't work as the gid depends on the hrn...
- # def locate (self, type, hrn=None, pointer=-1):
- # if pointer!=-1:
- # attempt = self.locate_by_type_pointer (type, pointer)
- # if attempt : return attempt
- # if hrn is not None:
- # attempt = self.locate_by_type_hrn (type, hrn,)
- # if attempt : return attempt
- # return None
-
- # this makes the run method a bit abtruse - out of the way
-
- def run(self, options):
- config = Config()
- interface_hrn = config.SFA_INTERFACE_HRN
- root_auth = config.SFA_REGISTRY_ROOT_AUTH
- shell = NitosShell(config)
-
- # retrieve all existing SFA objects
- all_records = global_dbsession.query(RegRecord).all()
-
- # create hash by (type,hrn)
- # we essentially use this to know if a given record is already known to
- # SFA
- self.records_by_type_hrn = \
- dict([((record.type, record.hrn), record)
- for record in all_records])
- # create hash by (type,pointer)
- self.records_by_type_pointer = \
- dict([((record.type, record.pointer), record) for record in all_records
- if record.pointer != -1])
-
- # initialize record.stale to True by default, then mark stale=False on
- # the ones that are in use
- for record in all_records:
- record.stale = True
-
- # retrieve NITOS data
- # Get site info
- # retrieve only required stuf
- site = shell.getTestbedInfo()
- sites = [site]
- # create a hash of sites by login_base
-# # sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
- # Get all NITOS users
- users = shell.getUsers()
- # create a hash of users by user_id
- users_by_id = dict([(user['user_id'], user) for user in users])
- # Get all NITOS public keys
- # accumulate key ids for keys retrieval
-# key_ids = []
-# for person in persons:
-# key_ids.extend(person['key_ids'])
-# keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids,
-# 'key_type': 'ssh'} )
-# # create a hash of keys by key_id
-# keys_by_id = dict ( [ ( key['key_id'], key ) for key in keys ] )
- # create a dict user_id -> [ (nitos)keys ]
- keys_by_user_id = dict(
- [(user['user_id'], user['keys']) for user in users])
- # Get all nitos nodes
- nodes = shell.getNodes({}, [])
- # create hash by node_id
- nodes_by_id = dict([(node['node_id'], node) for node in nodes])
- # Get all nitos slices
- slices = shell.getSlices({}, [])
- # create hash by slice_id
- slices_by_id = dict([(slice['slice_id'], slice) for slice in slices])
-
- # start importing
- for site in sites:
- # for i in [0]:
- site_hrn = _get_site_hrn(interface_hrn, site)
- # import if hrn is not in list of existing hrns or if the hrn exists
- # but its not a site record
- site_record = self.locate_by_type_hrn('authority', site_hrn)
- if not site_record:
- try:
- urn = hrn_to_urn(site_hrn, 'authority')
- if not self.auth_hierarchy.auth_exists(urn):
- self.auth_hierarchy.create_auth(urn)
- auth_info = self.auth_hierarchy.get_auth_info(urn)
- site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
- pointer=0,
- authority=get_authority(site_hrn))
- site_record.just_created()
- global_dbsession.add(site_record)
- global_dbsession.commit()
- self.logger.info(
- "NitosImporter: imported authority (site) : %s" % site_record)
- self.remember_record(site_record)
- except:
- # if the site import fails then there is no point in trying to import the
- # site's child records (node, slices, persons), so skip
- # them.
- self.logger.log_exc(
- "NitosImporter: failed to import site. Skipping child records")
- continue
- else:
- # xxx update the record ...
- pass
- site_record.stale = False
-
- # import node records
- for node in nodes:
- site_auth = get_authority(site_hrn)
- site_name = site['name']
- node_hrn = hostname_to_hrn(
- site_auth, site_name, node['hostname'])
- # xxx this sounds suspicious
- if len(node_hrn) > 64:
- node_hrn = node_hrn[:64]
- node_record = self.locate_by_type_hrn('node', node_hrn)
- if not node_record:
- try:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(node_hrn, 'node')
- node_gid = self.auth_hierarchy.create_gid(
- urn, create_uuid(), pkey)
- node_record = RegNode(hrn=node_hrn, gid=node_gid,
- pointer=node['node_id'],
- authority=get_authority(node_hrn))
- node_record.just_created()
- global_dbsession.add(node_record)
- global_dbsession.commit()
- self.logger.info(
- "NitosImporter: imported node: %s" % node_record)
- self.remember_record(node_record)
- except:
- self.logger.log_exc(
- "NitosImporter: failed to import node")
- else:
- # xxx update the record ...
- pass
-
- node_record.stale = False
-
- # import users
- for user in users:
- user_hrn = username_to_hrn(
- interface_hrn, site['name'], user['username'])
- # xxx suspicious again
- if len(user_hrn) > 64:
- user_hrn = user_hrn[:64]
- user_urn = hrn_to_urn(user_hrn, 'user')
-
- user_record = self.locate_by_type_hrn('user', user_hrn)
-
- # return a tuple pubkey (a nitos key object) and pkey (a
- # Keypair object)
- def init_user_key(user):
- pubkey = None
- pkey = None
- if user['keys']:
- # randomly pick first key in set
- for key in user['keys']:
- pubkey = key
- try:
- pkey = convert_public_key(pubkey)
- break
- except:
- continue
- if not pkey:
- self.logger.warn(
- 'NitosImporter: unable to convert public key for %s' % user_hrn)
- pkey = Keypair(create=True)
- else:
- # the user has no keys. Creating a random keypair for
- # the user's gid
- self.logger.warn(
- "NitosImporter: user %s does not have a NITOS public key" % user_hrn)
- pkey = Keypair(create=True)
- return (pubkey, pkey)
-
- # new user
- try:
- if not user_record:
- (pubkey, pkey) = init_user_key(user)
- user_gid = self.auth_hierarchy.create_gid(
- user_urn, create_uuid(), pkey)
- user_gid.set_email(user['email'])
- user_record = RegUser(hrn=user_hrn, gid=user_gid,
- pointer=user['user_id'],
- authority=get_authority(
- user_hrn),
- email=user['email'])
- if pubkey:
- user_record.reg_keys = [RegKey(pubkey)]
- else:
- self.logger.warning(
- "No key found for user %s" % user_record)
- user_record.just_created()
- global_dbsession.add(user_record)
- global_dbsession.commit()
- self.logger.info(
- "NitosImporter: imported user: %s" % user_record)
- self.remember_record(user_record)
- else:
- # update the record ?
- # if user's primary key has changed then we need to update the
- # users gid by forcing an update here
- sfa_keys = user_record.reg_keys
-
- def sfa_key_in_list(sfa_key, nitos_user_keys):
- for nitos_key in nitos_user_keys:
- if nitos_key == sfa_key:
- return True
- return False
- # are all the SFA keys known to nitos ?
- new_keys = False
- if not sfa_keys and user['keys']:
- new_keys = True
- else:
- for sfa_key in sfa_keys:
- if not sfa_key_in_list(sfa_key.key, user['keys']):
- new_keys = True
-
- if new_keys:
- (pubkey, pkey) = init_user_key(user)
- user_gid = self.auth_hierarchy.create_gid(
- user_urn, create_uuid(), pkey)
- if not pubkey:
- user_record.reg_keys = []
- else:
- user_record.reg_keys = [RegKey(pubkey)]
- user_record.gid = user_gid
- user_record.just_updated()
- self.logger.info(
- "NitosImporter: updated user: %s" % user_record)
- user_record.email = user['email']
- global_dbsession.commit()
- user_record.stale = False
- except:
- self.logger.log_exc("NitosImporter: failed to import user %s %s" % (
- user['user_id'], user['email']))
-
- # import slices
- for slice in slices:
- slice_hrn = slicename_to_hrn(
- interface_hrn, site['name'], slice['slice_name'])
- slice_record = self.locate_by_type_hrn('slice', slice_hrn)
- if not slice_record:
- try:
- pkey = Keypair(create=True)
- urn = hrn_to_urn(slice_hrn, 'slice')
- slice_gid = self.auth_hierarchy.create_gid(
- urn, create_uuid(), pkey)
- slice_record = RegSlice(hrn=slice_hrn, gid=slice_gid,
- pointer=slice['slice_id'],
- authority=get_authority(slice_hrn))
- slice_record.just_created()
- global_dbsession.add(slice_record)
- global_dbsession.commit()
- self.logger.info(
- "NitosImporter: imported slice: %s" % slice_record)
- self.remember_record(slice_record)
- except:
- self.logger.log_exc(
- "NitosImporter: failed to import slice")
- else:
- # xxx update the record ...
- self.logger.warning("Slice update not yet implemented")
- pass
- # record current users affiliated with the slice
- slice_record.reg_researchers = \
- [self.locate_by_type_pointer('user', int(
- user_id)) for user_id in slice['user_ids']]
- global_dbsession.commit()
- slice_record.stale = False
-
- # remove stale records
- # special records must be preserved
- system_hrns = [interface_hrn, root_auth,
- interface_hrn + '.slicemanager']
- for record in all_records:
- if record.hrn in system_hrns:
- record.stale = False
- if record.peer_authority:
- record.stale = False
-
- for record in all_records:
- try:
- stale = record.stale
- except:
- stale = True
- self.logger.warning("stale not found with %s" % record)
- if stale:
- self.logger.info(
- "NitosImporter: deleting stale record: %s" % record)
- global_dbsession.delete(record)
- global_dbsession.commit()
+++ /dev/null
-import os
-
-from sfa.util.config import Config
-from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
-from sfa.trust.gid import create_uuid
-from sfa.trust.certificate import convert_public_key, Keypair
-# using global alchemy.session() here is fine
-# as importer is on standalone one-shot process
-from sfa.storage.alchemy import global_dbsession
-from sfa.storage.model import RegRecord, RegAuthority, RegUser, RegSlice, RegNode
-from sfa.openstack.osxrn import OSXrn
-from sfa.openstack.shell import Shell
-
-
-def load_keys(filename):
- keys = {}
- tmp_dict = {}
- try:
- execfile(filename, tmp_dict)
- if 'keys' in tmp_dict:
- keys = tmp_dict['keys']
- return keys
- except:
- return keys
-
-
-def save_keys(filename, keys):
- f = open(filename, 'w')
- f.write("keys = %s" % str(keys))
- f.close()
-
-
-class OpenstackImporter:
-
- def __init__(self, auth_hierarchy, logger):
- self.auth_hierarchy = auth_hierarchy
- self.logger = logger
- self.config = Config()
- self.interface_hrn = self.config.SFA_INTERFACE_HRN
- self.root_auth = self.config.SFA_REGISTRY_ROOT_AUTH
- self.shell = Shell(self.config)
-
- def add_options(self, parser):
- self.logger.debug("OpenstackImporter: no options yet")
- pass
-
- def import_users(self, existing_hrns, existing_records):
- # Get all users
- users = self.shell.auth_manager.users.list()
- users_dict = {}
- keys_filename = self.config.config_path + os.sep + 'person_keys.py'
- old_user_keys = load_keys(keys_filename)
- user_keys = {}
- for user in users:
- auth_hrn = self.config.SFA_INTERFACE_HRN
- if user.tenantId is not None:
- tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
- auth_hrn = OSXrn(
- name=tenant.name, auth=self.config.SFA_INTERFACE_HRN, type='authority').get_hrn()
- hrn = OSXrn(name=user.name, auth=auth_hrn, type='user').get_hrn()
- users_dict[hrn] = user
- old_keys = old_user_keys.get(hrn, [])
- keyname = OSXrn(xrn=hrn, type='user').get_slicename()
- keys = [
- k.public_key for k in self.shell.nova_manager.keypairs.findall(name=keyname)]
- user_keys[hrn] = keys
- update_record = False
- if old_keys != keys:
- update_record = True
- if hrn not in existing_hrns or \
- (hrn, 'user') not in existing_records or update_record:
- urn = OSXrn(xrn=hrn, type='user').get_urn()
-
- if keys:
- try:
- pkey = convert_public_key(keys[0])
- except:
- self.logger.log_exc(
- 'unable to convert public key for %s' % hrn)
- pkey = Keypair(create=True)
- else:
- self.logger.warn(
- "OpenstackImporter: person %s does not have a PL public key" % hrn)
- pkey = Keypair(create=True)
- user_gid = self.auth_hierarchy.create_gid(
- urn, create_uuid(), pkey, email=user.email)
- user_record = RegUser()
- user_record.type = 'user'
- user_record.hrn = hrn
- user_record.gid = user_gid
- user_record.authority = get_authority(hrn)
- global_dbsession.add(user_record)
- global_dbsession.commit()
- self.logger.info(
- "OpenstackImporter: imported person %s" % user_record)
-
- return users_dict, user_keys
-
- def import_tenants(self, existing_hrns, existing_records):
- # Get all tenants
- # A tenant can represent an organizational group (site) or a
- # slice. If a tenant's authorty/parent matches the root authority it is
- # considered a group/site. All other tenants are considered slices.
- tenants = self.shell.auth_manager.tenants.list()
- tenants_dict = {}
- for tenant in tenants:
- hrn = self.config.SFA_INTERFACE_HRN + '.' + tenant.name
- tenants_dict[hrn] = tenant
- authority_hrn = OSXrn(
- xrn=hrn, type='authority').get_authority_hrn()
-
- if hrn in existing_hrns:
- continue
-
- if authority_hrn == self.config.SFA_INTERFACE_HRN:
- # import group/site
- record = RegAuthority()
- urn = OSXrn(xrn=hrn, type='authority').get_urn()
- if not self.auth_hierarchy.auth_exists(urn):
- self.auth_hierarchy.create_auth(urn)
- auth_info = self.auth_hierarchy.get_auth_info(urn)
- gid = auth_info.get_gid_object()
- record.type = 'authority'
- record.hrn = hrn
- record.gid = gid
- record.authority = get_authority(hrn)
- global_dbsession.add(record)
- global_dbsession.commit()
- self.logger.info(
- "OpenstackImporter: imported authority: %s" % record)
-
- else:
- record = RegSlice()
- urn = OSXrn(xrn=hrn, type='slice').get_urn()
- pkey = Keypair(create=True)
- gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
- record.type = 'slice'
- record.hrn = hrn
- record.gid = gid
- record.authority = get_authority(hrn)
- global_dbsession.add(record)
- global_dbsession.commit()
- self.logger.info(
- "OpenstackImporter: imported slice: %s" % record)
-
- return tenants_dict
-
- def run(self, options):
- # we don't have any options for now
- self.logger.info("OpenstackImporter.run : to do")
-
- # create dict of all existing sfa records
- existing_records = {}
- existing_hrns = []
- key_ids = []
- for record in global_dbsession.query(RegRecord):
- existing_records[(record.hrn, record.type,)] = record
- existing_hrns.append(record.hrn)
-
- tenants_dict = self.import_tenants(existing_hrns, existing_records)
- users_dict, user_keys = self.import_users(
- existing_hrns, existing_records)
-
- # remove stale records
- system_records = [self.interface_hrn, self.root_auth,
- self.interface_hrn + '.slicemanager']
- for (record_hrn, type) in existing_records.keys():
- if record_hrn in system_records:
- continue
-
- record = existing_records[(record_hrn, type)]
- if record.peer_authority:
- continue
-
- if type == 'user':
- if record_hrn in users_dict:
- continue
- elif type in['slice', 'authority']:
- if record_hrn in tenants_dict:
- continue
- else:
- continue
-
- record_object = existing_records[(record_hrn, type)]
- self.logger.info("OpenstackImporter: removing %s " % record)
- global_dbsession.delete(record_object)
- global_dbsession.commit()
-
- # save pub keys
- self.logger.info('OpenstackImporter: saving current pub keys')
- keys_filename = self.config.config_path + os.sep + 'person_keys.py'
- save_keys(keys_filename, user_keys)
elif person_id in disabled_person_ids:
pass
else:
- self.logger.warning("PlImporter: cannot locate person_id {} in site {} - ignored"
- .format(person_id, site_hrn))
+ self.logger.warning(
+ "PlImporter: cannot locate person_id {} in site {} - ignored"
+ .format(person_id, site_hrn))
# make sure to NOT run this if anything is wrong
if not proceed:
continue
#person_hrn = email_to_hrn(site_hrn, person['email'])
person_hrn = person['hrn']
if person_hrn is None:
- self.logger.warn(
+ self.logger.warning(
"Person {} has no hrn - skipped".format(person['email']))
continue
# xxx suspicious again
try:
pkey = convert_public_key(pubkey['key'])
except:
- self.logger.warn('PlImporter: unable to convert public key for {}'
- .format(person_hrn))
+ self.logger.warning(
+ 'PlImporter: unable to convert public key for {}'
+ .format(person_hrn))
pkey = Keypair(create=True)
else:
# the user has no keys. Creating a random keypair for
# the user's gid
- self.logger.warn("PlImporter: person {} does not have a PL public key"
- .format(person_hrn))
+ self.logger.warning(
+ "PlImporter: person {} does not have a PL public key"
+ .format(person_hrn))
pkey = Keypair(create=True)
return (pubkey, pkey)
+++ /dev/null
-import os
-import time
-import re
-
-#from sfa.util.faults import *
-from sfa.util.sfalogging import logger
-from sfa.util.sfatime import SFATIME_FORMAT
-from sfa.util.config import Config
-from sfa.util.callids import Callids
-from sfa.util.version import version_core
-from sfa.util.xrn import urn_to_hrn, hrn_to_urn, Xrn
-
-# xxx the sfa.rspecs module is dead - this symbol is now undefined
-#from sfa.rspecs.sfa_rspec import sfa_rspec_version
-
-from sfa.managers.aggregate_manager import AggregateManager
-
-from sfa.planetlab.plslices import PlSlices
-
-
-class AggregateManagerMax (AggregateManager):
-
- def __init__(self, config):
- pass
-
- RSPEC_TMP_FILE_PREFIX = "/tmp/max_rspec"
-
- # execute shell command and return both exit code and text output
- def shell_execute(self, cmd, timeout):
- pipe = os.popen('{ ' + cmd + '; } 2>&1', 'r')
- pipe = os.popen(cmd + ' 2>&1', 'r')
- text = ''
- while timeout:
- line = pipe.read()
- text += line
- time.sleep(1)
- timeout = timeout - 1
- code = pipe.close()
- if code is None:
- code = 0
- if text[-1:] == '\n':
- text = text[:-1]
- return code, text
-
- def call_am_apiclient(self, client_app, params, timeout):
- """
- call AM API client with command like in the following example:
- cd aggregate_client; java -classpath AggregateWS-client-api.jar:lib/* \
- net.geni.aggregate.client.examples.CreateSliceNetworkClient \
- ./repo https://geni:8443/axis2/services/AggregateGENI \
- ... params ...
- """
- (client_path, am_url) = Config().get_max_aggrMgr_info()
- sys_cmd = "cd " + client_path + "; java -classpath AggregateWS-client-api.jar:lib/* net.geni.aggregate.client.examples." + \
- client_app + " ./repo " + am_url + " " + ' '.join(params)
- ret = self.shell_execute(sys_cmd, timeout)
- logger.debug("shell_execute cmd: %s returns %s" % (sys_cmd, ret))
- return ret
-
- # save request RSpec xml content to a tmp file
- def save_rspec_to_file(self, rspec):
- path = AggregateManagerMax.RSPEC_TMP_FILE_PREFIX + "_" + \
- time.strftime(SFATIME_FORMAT, time.gmtime(time.time())) + ".xml"
- file = open(path, "w")
- file.write(rspec)
- file.close()
- return path
-
- # get stripped down slice id/name plc.maxpl.xislice1 --> maxpl_xislice1
- def get_plc_slice_id(self, cred, xrn):
- (hrn, type) = urn_to_hrn(xrn)
- slice_id = hrn.find(':')
- sep = '.'
- if hrn.find(':') != -1:
- sep = ':'
- elif hrn.find('+') != -1:
- sep = '+'
- else:
- sep = '.'
- slice_id = hrn.split(sep)[-2] + '_' + hrn.split(sep)[-1]
- return slice_id
-
- # extract xml
- def get_xml_by_tag(self, text, tag):
- indx1 = text.find('<' + tag)
- indx2 = text.find('/' + tag + '>')
- xml = None
- if indx1 != -1 and indx2 > indx1:
- xml = text[indx1:indx2 + len(tag) + 2]
- return xml
-
- # formerly in aggregate_manager.py but got unused in there...
- def _get_registry_objects(self, slice_xrn, creds, users):
- """
-
- """
- hrn, _ = urn_to_hrn(slice_xrn)
-
- #hrn_auth = get_authority(hrn)
-
- # Build up objects that an SFA registry would return if SFA
- # could contact the slice's registry directly
- reg_objects = None
-
- if users:
- # dont allow special characters in the site login base
- #only_alphanumeric = re.compile('[^a-zA-Z0-9]+')
- #login_base = only_alphanumeric.sub('', hrn_auth[:20]).lower()
- slicename = hrn_to_pl_slicename(hrn)
- login_base = slicename.split('_')[0]
- reg_objects = {}
- site = {}
- site['site_id'] = 0
- site['name'] = 'geni.%s' % login_base
- site['enabled'] = True
- site['max_slices'] = 100
-
- # Note:
- # Is it okay if this login base is the same as one already at this myplc site?
- # Do we need uniqueness? Should use hrn_auth instead of just the
- # leaf perhaps?
- site['login_base'] = login_base
- site['abbreviated_name'] = login_base
- site['max_slivers'] = 1000
- reg_objects['site'] = site
-
- slice = {}
-
- # get_expiration always returns a normalized datetime - no need to
- # utcparse
- extime = Credential(string=creds[0]).get_expiration()
- # If the expiration time is > 60 days from now, set the expiration
- # time to 60 days from now
- if extime > datetime.datetime.utcnow() + datetime.timedelta(days=60):
- extime = datetime.datetime.utcnow() + datetime.timedelta(days=60)
- slice['expires'] = int(time.mktime(extime.timetuple()))
- slice['hrn'] = hrn
- slice['name'] = hrn_to_pl_slicename(hrn)
- slice['url'] = hrn
- slice['description'] = hrn
- slice['pointer'] = 0
- reg_objects['slice_record'] = slice
-
- reg_objects['users'] = {}
- for user in users:
- user['key_ids'] = []
- hrn, _ = urn_to_hrn(user['urn'])
- user['email'] = hrn_to_pl_slicename(hrn) + "@geni.net"
- user['first_name'] = hrn
- user['last_name'] = hrn
- reg_objects['users'][user['email']] = user
-
- return reg_objects
-
- def prepare_slice(self, api, slice_xrn, creds, users):
- reg_objects = self._get_registry_objects(slice_xrn, creds, users)
- (hrn, type) = urn_to_hrn(slice_xrn)
- slices = PlSlices(self.driver)
- peer = slices.get_peer(hrn)
- sfa_peer = slices.get_sfa_peer(hrn)
- slice_record = None
- if users:
- slice_record = users[0].get('slice_record', {})
- registry = api.registries[api.hrn]
- credential = api.getCredential()
- # ensure site record exists
- site = slices.verify_site(hrn, slice_record, peer, sfa_peer)
- # ensure slice record exists
- slice = slices.verify_slice(hrn, slice_record, peer, sfa_peer)
- # ensure person records exists
- persons = slices.verify_persons(hrn, slice, users, peer, sfa_peer)
-
- def parse_resources(self, text, slice_xrn):
- resources = []
- urn = hrn_to_urn(slice_xrn, 'sliver')
- plc_slice = re.search("Slice Status => ([^\n]+)", text)
- if plc_slice.group(1) != 'NONE':
- res = {}
- res['geni_urn'] = urn + '_plc_slice'
- res['geni_error'] = ''
- res['geni_status'] = 'unknown'
- if plc_slice.group(1) == 'CREATED':
- res['geni_status'] = 'ready'
- resources.append(res)
- vlans = re.findall("GRI => ([^\n]+)\n\t Status => ([^\n]+)", text)
- for vlan in vlans:
- res = {}
- res['geni_error'] = ''
- res['geni_urn'] = urn + '_vlan_' + vlan[0]
- if vlan[1] == 'ACTIVE':
- res['geni_status'] = 'ready'
- elif vlan[1] == 'FAILED':
- res['geni_status'] = 'failed'
- else:
- res['geni_status'] = 'configuring'
- resources.append(res)
- return resources
-
- def slice_status(self, api, slice_xrn, creds):
- urn = hrn_to_urn(slice_xrn, 'slice')
- result = {}
- top_level_status = 'unknown'
- slice_id = self.get_plc_slice_id(creds, urn)
- (ret, output) = self.call_am_apiclient(
- "QuerySliceNetworkClient", [slice_id, ], 5)
- # parse output into rspec XML
- if output.find("Unkown Rspec:") > 0:
- top_level_staus = 'failed'
- result['geni_resources'] = ''
- else:
- has_failure = 0
- all_active = 0
- if output.find("Status => FAILED") > 0:
- top_level_staus = 'failed'
- elif (output.find("Status => ACCEPTED") > 0 or output.find("Status => PENDING") > 0
- or output.find("Status => INSETUP") > 0 or output.find("Status => INCREATE") > 0
- ):
- top_level_status = 'configuring'
- else:
- top_level_status = 'ready'
- result['geni_resources'] = self.parse_resources(output, slice_xrn)
- result['geni_urn'] = urn
- result['geni_status'] = top_level_status
- return result
-
- def create_slice(self, api, xrn, cred, rspec, users):
- indx1 = rspec.find("<RSpec")
- indx2 = rspec.find("</RSpec>")
- if indx1 > -1 and indx2 > indx1:
- rspec = rspec[indx1 + len("<RSpec type=\"SFA\">"):indx2 - 1]
- rspec_path = self.save_rspec_to_file(rspec)
- self.prepare_slice(api, xrn, cred, users)
- slice_id = self.get_plc_slice_id(cred, xrn)
- sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + slice_id + "/g\" " + \
- rspec_path + \
- ";sed -i \"s/:rspec=[^:'<\\\" ]*/:rspec=" + \
- slice_id + "/g\" " + rspec_path
- ret = self.shell_execute(sys_cmd, 1)
- sys_cmd = "sed -i \"s/rspec id=\\\"[^\\\"]*/rspec id=\\\"" + \
- rspec_path + "/g\""
- ret = self.shell_execute(sys_cmd, 1)
- (ret, output) = self.call_am_apiclient(
- "CreateSliceNetworkClient", [rspec_path, ], 3)
- # parse output ?
- rspec = "<RSpec type=\"SFA\"> Done! </RSpec>"
- return True
-
- def delete_slice(self, api, xrn, cred):
- slice_id = self.get_plc_slice_id(cred, xrn)
- (ret, output) = self.call_am_apiclient(
- "DeleteSliceNetworkClient", [slice_id, ], 3)
- # parse output ?
- return 1
-
- def get_rspec(self, api, cred, slice_urn):
- logger.debug("#### called max-get_rspec")
- # geni_slice_urn: urn:publicid:IDN+plc:maxpl+slice+xi_rspec_test1
- if slice_urn == None:
- (ret, output) = self.call_am_apiclient(
- "GetResourceTopology", ['all', '\"\"'], 5)
- else:
- slice_id = self.get_plc_slice_id(cred, slice_urn)
- (ret, output) = self.call_am_apiclient(
- "GetResourceTopology", ['all', slice_id, ], 5)
- # parse output into rspec XML
- if output.find("No resouce found") > 0:
- rspec = "<RSpec type=\"SFA\"> <Fault>No resource found</Fault> </RSpec>"
- else:
- comp_rspec = self.get_xml_by_tag(output, 'computeResource')
- logger.debug("#### computeResource %s" % comp_rspec)
- topo_rspec = self.get_xml_by_tag(output, 'topology')
- logger.debug("#### topology %s" % topo_rspec)
- rspec = "<RSpec type=\"SFA\"> <network name=\"" + \
- Config().get_interface_hrn() + "\">"
- if comp_rspec != None:
- rspec = rspec + self.get_xml_by_tag(output, 'computeResource')
- if topo_rspec != None:
- rspec = rspec + self.get_xml_by_tag(output, 'topology')
- rspec = rspec + "</network> </RSpec>"
- return (rspec)
-
- def start_slice(self, api, xrn, cred):
- # service not supported
- return None
-
- def stop_slice(self, api, xrn, cred):
- # service not supported
- return None
-
- def reset_slices(self, api, xrn):
- # service not supported
- return None
-
- # GENI AM API Methods
-
- def SliverStatus(self, api, slice_xrn, creds, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return {}
- return self.slice_status(api, slice_xrn, creds)
-
- def CreateSliver(self, api, slice_xrn, creds, rspec_string, users, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
- # TODO: create real CreateSliver response rspec
- ret = self.create_slice(api, slice_xrn, creds, rspec_string, users)
- if ret:
- return self.get_rspec(api, creds, slice_xrn)
- else:
- return "<?xml version=\"1.0\" ?> <RSpec type=\"SFA\"> Error! </RSpec>"
-
- def DeleteSliver(self, api, xrn, creds, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
- return self.delete_slice(api, xrn, creds)
-
- # no caching
- def ListResources(self, api, creds, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
- # version_string = "rspec_%s" % (rspec_version.get_version_name())
- slice_urn = options.get('geni_slice_urn')
- return self.get_rspec(api, creds, slice_urn)
-
- def fetch_context(self, slice_hrn, user_hrn, contexts):
- """
- Returns the request context required by sfatables. At some point, this mechanism should be changed
- to refer to "contexts", which is the information that sfatables is requesting. But for now, we just
- return the basic information needed in a dict.
- """
- base_context = {'sfa': {'user': {'hrn': user_hrn}}}
- return base_context
+++ /dev/null
-
-def start_slice(api, slicename):
- return
-
-
-def stop_slice(api, slicename):
- return
-
-
-def DeleteSliver(api, slicename, call_id):
- return
-
-
-def reset_slice(api, slicename):
- return
-
-
-def ListSlices(api):
- return []
-
-
-def reboot():
- return
-
-
-def redeem_ticket(api, ticket_string):
- return
+++ /dev/null
-from sfa.util.faults import SliverDoesNotExist
-from sfa.util.version import version_core
-from sfa.util.py23 import xmlrpc_client
-
-from sfa.trust.sfaticket import SfaTicket
-
-from sfa.planetlab.plxrn import PlXrn
-
-
-def GetVersion(api, options):
- return version_core({'interface': 'component',
- 'testbed': 'myplc'})
-
-
-def init_server():
- from sfa.server import sfa_component_setup
- # get current trusted gids
- try:
- sfa_component_setup.get_trusted_certs()
- except:
- # our keypair may be old, try refreshing
- sfa_component_setup.get_node_key()
- sfa_component_setup.GetCredential(force=True)
- sfa_component_setup.get_trusted_certs()
-
-
-def SliverStatus(api, slice_xrn, creds):
- result = {}
- result['geni_urn'] = slice_xrn
- result['geni_status'] = 'unknown'
- result['geni_resources'] = {}
- return result
-
-
-def start_slice(api, xrn, creds):
- slicename = PlXrn(xrn, type='slice').pl_slicename()
- api.driver.nodemanager.Start(slicename)
-
-
-def stop_slice(api, xrn, creds):
- slicename = PlXrn(xrn, type='slice').pl_slicename()
- api.driver.nodemanager.Stop(slicename)
-
-
-def DeleteSliver(api, xrn, creds, call_id):
- slicename = PlXrn(xrn, type='slice').pl_slicename()
- api.driver.nodemanager.Destroy(slicename)
-
-
-def reset_slice(api, xrn):
- slicename = PlXrn(xrn, type='slice').pl_slicename()
- if not api.sliver_exists(slicename):
- raise SliverDoesNotExist(slicename)
- api.driver.nodemanager.ReCreate(slicename)
-
-# xxx outdated - this should accept a credential & call_id
-
-
-def ListSlices(api):
- # this returns a tuple, the data we want is at index 1
- xids = api.driver.nodemanager.GetXIDs()
- # unfortunately the data we want is given to us as
- # a string but we really want it as a dict
- # lets eval it
- slices = eval(xids[1])
- return slices.keys()
-
-
-def redeem_ticket(api, ticket_string):
- ticket = SfaTicket(string=ticket_string)
- ticket.decode()
- hrn = ticket.attributes['slivers'][0]['hrn']
- slicename = PlXrn(hrn).pl_slicename()
- if not api.sliver_exists(slicename):
- raise SliverDoesNotExist(slicename)
-
- # convert ticket to format nm is used to
- nm_ticket = xmlrpc_client.dumps((ticket.attributes,), methodresponse=True)
- api.driver.nodemanager.AdminTicket(nm_ticket)
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<rspec id="max_rspec_slice1" xmlns="http://geni.maxgigapop.net/aggregate/rspec/20100412/" schemaLocation="http://geni.dragon.maxgigapop.net/max-rspec.xsd"
- xmlns:CtrlPlane="http://ogf.org/schema/network/topology/ctrlPlane/20080828/" CtrlPlane:schemaLocation="http://www.controlplane.net/idcp-v1.1/nmtopo-ctrlp.xsd">
- <aggregate>geni.maxgigapop.net</aggregate>
- <description>Example MAX RSpec</description>
- <lifetime id="time-1271533930-1271563981">
- <CtrlPlane:start type="CtrlPlane:TimeContent">1279848020</CtrlPlane:start>
- <CtrlPlane:end type="CtrlPlane:TimeContent">1280712039</CtrlPlane:end>
- </lifetime>
- <computeResource id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1">
- <planetlabNodeSliver id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3">
- <address>206.196.176.55</address>
- <computeCapacity>
- <cpuType>generic</cpuType>
- <cpuSpeed>2.0GHz</cpuSpeed>
- <numCpuCores>1</numCpuCores>
- <memorySize>256MB</memorySize>
- <diskSize>16GB</diskSize>
- </computeCapacity>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_1">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.10.2/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_1</peerNetworkInterface>
- </networkInterface>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_2">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.30.1/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_2</peerNetworkInterface>
- </networkInterface>
- </planetlabNodeSliver>
- <planetlabNodeSliver id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5">
- <address>206.196.176.138</address>
- <computeCapacity>
- <cpuType>generic</cpuType>
- <cpuSpeed>2.0GHz</cpuSpeed>
- <numCpuCores>1</numCpuCores>
- <memorySize>256MB</memorySize>
- <diskSize>16GB</diskSize>
- </computeCapacity>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_3">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.20.2/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_3</peerNetworkInterface>
- </networkInterface>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_2">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.30.2/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_2</peerNetworkInterface>
- </networkInterface>
- </planetlabNodeSliver>
- <planetlabNodeSliver id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2">
- <address>206.196.176.133</address>
- <computeCapacity>
- <cpuType>generic</cpuType>
- <cpuSpeed>2.0GHz</cpuSpeed>
- <numCpuCores>1</numCpuCores>
- <memorySize>256MB</memorySize>
- <diskSize>16GB</diskSize>
- </computeCapacity>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_1">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.10.1/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab3:interface=eth1.any_1</peerNetworkInterface>
- </networkInterface>
- <networkInterface id="urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab2:interface=eth1.any_3">
- <deviceType>Ethernet</deviceType>
- <deviceName>eth1</deviceName>
- <capacity>100Mbps</capacity>
- <ipAddress>10.10.20.1/24</ipAddress>
- <vlanRange>any</vlanRange>
- <peerNetworkInterface>urn:aggregate=geni.maxgigapop.net:rspec=my-test-max-rspec-slice1:domain=dragon.maxgigapop.net:node=planetlab5:interface=eth1.any_3</peerNetworkInterface>
- </networkInterface>
- </planetlabNodeSliver>
- </computeResource>
-</rspec>
+# pylint: disable=c0111, w1202
+
from __future__ import print_function
# for get_key_from_incoming_ip
import os
import commands
-from sfa.util.faults import RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority, \
- UnknownSfaType, ExistingRecord, NonExistingRecord
+from sfa.util.faults import (
+ RecordNotFound, AccountNotEnabled, PermissionError, MissingAuthority,
+ UnknownSfaType, ExistingRecord, NonExistingRecord)
from sfa.util.sfatime import utcparse, datetime_to_epoch
from sfa.util.prefixTree import prefixTree
from sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
from sfa.trust.certificate import Certificate, Keypair, convert_public_key
from sfa.trust.gid import create_uuid
-from sfa.storage.model import make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey, \
- augment_with_sfa_builtins
+from sfa.storage.model import (
+ make_record, RegRecord, RegAuthority, RegUser, RegSlice, RegKey,
+ augment_with_sfa_builtins)
# the types that we need to exclude from sqlobjects before being able to dump
# them on the xmlrpc wire
from sqlalchemy.orm.collections import InstrumentedList
# 'researcher' or 'pi' to be set - reg-* are just ignored
#
# the '_normalize_input' helper functions below aim at ironing this out
-# however in order to break as few code as possible we essentially make sure that *both* fields are set
-# upon entering the write methods (so again register and update) for legacy, as some driver code
+# however in order to break as few code as possible we essentially
+# make sure that *both* fields are set upon entering the write methods
+# (again: register and update) for legacy, as some driver code
# might depend on the presence of, say, 'researcher'
# normalize an input record to a write method - register or update
class RegistryManager:
def __init__(self, config):
- logger.info("Creating RegistryManager[{}]".format(id(self)))
+ logger.debug("Creating RegistryManager[{}]".format(id(self)))
# The GENI GetVersion call
def GetVersion(self, api, options):
- peers = dict([(hrn, interface.get_url()) for (hrn, interface) in api.registries.iteritems()
- if hrn != api.hrn])
+ peers = {hrn: interface.get_url()
+ for (hrn, interface) in api.registries.iteritems()
+ if hrn != api.hrn}
xrn = Xrn(api.hrn, type='authority')
return version_core({'interface': 'registry',
'sfa': 3,
RegRecord).filter_by(hrn=caller_hrn).first()
if not caller_record:
raise RecordNotFound(
- "Unable to associated caller (hrn={}, type={}) with credential for (hrn: {}, type: {})"
+ "Unable to associated caller (hrn={}, type={}) "
+ "with credential for (hrn: {}, type: {})"
.format(caller_hrn, caller_type, hrn, type))
caller_gid = GID(string=caller_record.gid)
new_cred.set_expiration(int(expires))
auth_kind = "authority,ma,sa"
# Parent not necessary, verify with certs
- #new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
+ # new_cred.set_parent(api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
new_cred.encode()
new_cred.sign()
credential = api.getCredential()
interface = api.registries[registry_hrn]
server_proxy = api.server_proxy(interface, credential)
- # should propagate the details flag but that's not supported in the xmlrpc interface yet
- #peer_records = server_proxy.Resolve(xrns, credential,type, details=details)
+ # should propagate the details flag but that's not supported
+ # in the xmlrpc interface yet
+ # peer_records = server_proxy.Resolve(xrns, credential,type, details=details)
peer_records = server_proxy.Resolve(xrns, credential)
# pass foreign records as-is
# previous code used to read
record_dicts = record_list
# if we still have not found the record yet, try the local registry
-# logger.debug("before trying local records, {} foreign records".format(len(record_dicts)))
+ # logger.debug("before trying local records, {} foreign records"
+ # .format(len(record_dicts)))
if not record_dicts:
recursive = False
if ('recursive' in options and options['recursive']):
if recursive:
records = dbsession.query(RegRecord).filter(
RegRecord.hrn.startswith(hrn)).all()
-# logger.debug("recursive mode, found {} local records".format(len(records)))
+ # logger.debug("recursive mode, found {} local records".
+ # format(len(records)))
else:
records = dbsession.query(
RegRecord).filter_by(authority=hrn).all()
-# logger.debug("non recursive mode, found {} local records".format(len(records)))
+ # logger.debug("non recursive mode, found {} local records"
+ # .format(len(records)))
# so that sfi list can show more than plain names...
for record in records:
# xxx mystery - see also the bottom of model.py
# utility for handling relationships among the SFA objects
# subject_record describes the subject of the relationships
- # ref_record contains the target values for the various relationships we need to manage
- # (to begin with, this is just the slice x person (researcher) and authority x person (pi) relationships)
+ # ref_record contains the target values for the various relationships
+ # we need to manage (to begin with, this is just the
+ # slice x person (researcher) and authority x person (pi) relationships)
def update_driver_relations(self, api, subject_obj, ref_obj):
type = subject_obj.type
# for (k,v) in subject_obj.__dict__.items(): print k,'=',v
self.update_driver_relation(
api, subject_obj, ref_obj.pi, 'user', 'pi')
- # field_key is the name of one field in the record, typically 'researcher' for a 'slice' record
+ # field_key is the name of one field in the record,
+ # typically 'researcher' for a 'slice' record
# hrns is the list of hrns that should be linked to the subject from now on
# target_type would be e.g. 'user' in the 'slice' x 'researcher' example
- def update_driver_relation(self, api, record_obj, hrns, target_type, relation_name):
+ def update_driver_relation(self, api, record_obj, hrns,
+ target_type, relation_name):
dbsession = api.dbsession()
# locate the linked objects in our db
subject_type = record_obj.type
+++ /dev/null
-import sys
-import time
-import traceback
-from copy import copy
-from lxml import etree
-
-from sfa.trust.sfaticket import SfaTicket
-from sfa.trust.credential import Credential
-
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import Xrn, urn_to_hrn
-from sfa.util.version import version_core
-from sfa.util.callids import Callids
-from sfa.util.cache import Cache
-
-from sfa.client.multiclient import MultiClient
-
-from sfa.rspecs.rspec_converter import RSpecConverter
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-
-from sfa.client.client_helper import sfa_to_pg_users_arg
-from sfa.client.return_value import ReturnValue
-
-
-class SliceManager:
-
- # the cache instance is a class member so it survives across incoming
- # requests
- cache = None
-
- def __init__(self, config):
- self.cache = None
- if config.SFA_SM_CACHING:
- if SliceManager.cache is None:
- SliceManager.cache = Cache()
- self.cache = SliceManager.cache
-
- def GetVersion(self, api, options):
- # peers explicitly in aggregates.xml
- peers = dict([(peername, interface.get_url()) for (peername, interface) in api.aggregates.iteritems()
- if peername != api.hrn])
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- cred_types = [{'geni_type': 'geni_sfa',
- 'geni_version': str(i)} for i in range(4)[-2:]]
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- xrn = Xrn(api.hrn, 'authority+sm')
- version_more = {
- 'interface': 'slicemgr',
- 'sfa': 2,
- 'geni_api': 3,
- 'geni_api_versions': {'3': 'https://%s:%s' % (api.config.SFA_SM_HOST, api.config.SFA_SM_PORT)},
- 'hrn': xrn.get_hrn(),
- 'urn': xrn.get_urn(),
- 'peers': peers,
- # Accept operations that act on as subset of slivers in a given
- # state.
- 'geni_single_allocation': 0,
- # Multiple slivers can exist and be incrementally added, including
- # those which connect or overlap in some way.
- 'geni_allocate': 'geni_many',
- 'geni_credential_types': cred_types,
- }
- sm_version = version_core(version_more)
- # local aggregate if present needs to have localhost resolved
- if api.hrn in api.aggregates:
- local_am_url = api.aggregates[api.hrn].get_url()
- sm_version['peers'][api.hrn] = local_am_url.replace(
- 'localhost', sm_version['hostname'])
- return sm_version
-
- def drop_slicemgr_stats(self, rspec):
- try:
- stats_elements = rspec.xml.xpath('//statistics')
- for node in stats_elements:
- node.getparent().remove(node)
- except Exception as e:
- logger.warn("drop_slicemgr_stats failed: %s " % (str(e)))
-
- def add_slicemgr_stat(self, rspec, callname, aggname, elapsed, status, exc_info=None):
- try:
- stats_tags = rspec.xml.xpath('//statistics[@call="%s"]' % callname)
- if stats_tags:
- stats_tag = stats_tags[0]
- else:
- stats_tag = rspec.xml.root.add_element(
- "statistics", call=callname)
-
- stat_tag = stats_tag.add_element("aggregate", name=str(aggname),
- elapsed=str(elapsed), status=str(status))
-
- if exc_info:
- exc_tag = stat_tag.add_element(
- "exc_info", name=str(exc_info[1]))
-
- # formats the traceback as one big text blob
- #exc_tag.text = "\n".join(traceback.format_exception(exc_info[0], exc_info[1], exc_info[2]))
-
- # formats the traceback as a set of xml elements
- tb = traceback.extract_tb(exc_info[2])
- for item in tb:
- exc_frame = exc_tag.add_element("tb_frame", filename=str(item[0]),
- line=str(item[1]), func=str(item[2]), code=str(item[3]))
-
- except Exception as e:
- logger.warn("add_slicemgr_stat failed on %s: %s" %
- (aggname, str(e)))
-
- def ListResources(self, api, creds, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
-
- version_manager = VersionManager()
-
- def _ListResources(aggregate, server, credential, options):
- forward_options = copy(options)
- tStart = time.time()
- try:
- version = api.get_cached_server_version(server)
- # force ProtoGENI aggregates to give us a v2 RSpec
- forward_options['geni_rspec_version'] = options.get(
- 'geni_rspec_version')
- result = server.ListResources(credential, forward_options)
- return {"aggregate": aggregate, "result": result, "elapsed": time.time() - tStart, "status": "success"}
- except Exception as e:
- api.logger.log_exc("ListResources failed at %s" % (server.url))
- return {"aggregate": aggregate, "elapsed": time.time() - tStart, "status": "exception", "exc_info": sys.exc_info()}
-
- # get slice's hrn from options
- xrn = options.get('geni_slice_urn', '')
- (hrn, type) = urn_to_hrn(xrn)
- if 'geni_compressed' in options:
- del(options['geni_compressed'])
-
- # get the rspec's return format from options
- rspec_version = version_manager.get_version(
- options.get('geni_rspec_version'))
- version_string = "rspec_%s" % (rspec_version)
-
- # look in cache first
- cached_requested = options.get('cached', True)
- if not xrn and self.cache and cached_requested:
- rspec = self.cache.get(version_string)
- if rspec:
- api.logger.debug(
- "SliceManager.ListResources returns cached advertisement")
- return rspec
-
- # get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'listnodes', hrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
-
- # get the rspec from the aggregate
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(_ListResources, aggregate, server, [cred], options)
-
- results = multiclient.get_results()
- rspec_version = version_manager.get_version(
- options.get('geni_rspec_version'))
- if xrn:
- result_version = version_manager._get_version(
- rspec_version.type, rspec_version.version, 'manifest')
- else:
- result_version = version_manager._get_version(
- rspec_version.type, rspec_version.version, 'ad')
- rspec = RSpec(version=result_version)
- for result in results:
- self.add_slicemgr_stat(rspec, "ListResources", result["aggregate"], result["elapsed"],
- result["status"], result.get("exc_info", None))
- if result["status"] == "success":
- res = result['result']['value']
- try:
- rspec.version.merge(ReturnValue.get_value(res))
- except:
- api.logger.log_exc(
- "SM.ListResources: Failed to merge aggregate rspec")
-
- # cache the result
- if self.cache and not xrn:
- api.logger.debug("SliceManager.ListResources caches advertisement")
- self.cache.add(version_string, rspec.toxml())
-
- return rspec.toxml()
-
- def Allocate(self, api, xrn, creds, rspec_str, expiration, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
-
- version_manager = VersionManager()
-
- def _Allocate(aggregate, server, xrn, credential, rspec, options):
- tStart = time.time()
- try:
- # Need to call GetVersion at an aggregate to determine the supported
- # rspec type/format beofre calling CreateSliver at an Aggregate.
- #server_version = api.get_cached_server_version(server)
- # if 'sfa' not in server_version and 'geni_api' in server_version:
- # sfa aggregtes support both sfa and pg rspecs, no need to convert
- # if aggregate supports sfa rspecs. otherwise convert to pg rspec
- #rspec = RSpec(RSpecConverter.to_pg_rspec(rspec, 'request'))
- #filter = {'component_manager_id': server_version['urn']}
- # rspec.filter(filter)
- #rspec = rspec.toxml()
- result = server.Allocate(xrn, credential, rspec, options)
- return {"aggregate": aggregate, "result": result, "elapsed": time.time() - tStart, "status": "success"}
- except:
- logger.log_exc(
- 'Something wrong in _Allocate with URL %s' % server.url)
- return {"aggregate": aggregate, "elapsed": time.time() - tStart, "status": "exception", "exc_info": sys.exc_info()}
-
- # Validate the RSpec against PlanetLab's schema --disabled for now
- # The schema used here needs to aggregate the PL and VINI schemas
- # schema = "/var/www/html/schemas/pl.rng"
- rspec = RSpec(rspec_str)
- # schema = None
- # if schema:
- # rspec.validate(schema)
-
- # if there is a <statistics> section, the aggregates don't care about it,
- # so delete it.
- self.drop_slicemgr_stats(rspec)
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
-
- # get the callers hrn
- hrn, type = urn_to_hrn(xrn)
- valid_cred = api.auth.checkCredentials(creds, 'createsliver', hrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- # Just send entire RSpec to each aggregate
- multiclient.run(_Allocate, aggregate, server, xrn,
- [cred], rspec.toxml(), options)
-
- results = multiclient.get_results()
- manifest_version = version_manager._get_version(
- rspec.version.type, rspec.version.version, 'manifest')
- result_rspec = RSpec(version=manifest_version)
- geni_urn = None
- geni_slivers = []
-
- for result in results:
- self.add_slicemgr_stat(result_rspec, "Allocate", result["aggregate"], result["elapsed"],
- result["status"], result.get("exc_info", None))
- if result["status"] == "success":
- try:
- res = result['result']['value']
- geni_urn = res['geni_urn']
- result_rspec.version.merge(
- ReturnValue.get_value(res['geni_rspec']))
- geni_slivers.extend(res['geni_slivers'])
- except:
- api.logger.log_exc(
- "SM.Allocate: Failed to merge aggregate rspec")
- return {
- 'geni_urn': geni_urn,
- 'geni_rspec': result_rspec.toxml(),
- 'geni_slivers': geni_slivers
- }
-
- def Provision(self, api, xrn, creds, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
-
- version_manager = VersionManager()
-
- def _Provision(aggregate, server, xrn, credential, options):
- tStart = time.time()
- try:
- # Need to call GetVersion at an aggregate to determine the supported
- # rspec type/format beofre calling CreateSliver at an
- # Aggregate.
- server_version = api.get_cached_server_version(server)
- result = server.Provision(xrn, credential, options)
- return {"aggregate": aggregate, "result": result, "elapsed": time.time() - tStart, "status": "success"}
- except:
- logger.log_exc(
- 'Something wrong in _Allocate with URL %s' % server.url)
- return {"aggregate": aggregate, "elapsed": time.time() - tStart, "status": "exception", "exc_info": sys.exc_info()}
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
-
- # get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'createsliver', xrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- # Just send entire RSpec to each aggregate
- multiclient.run(_Provision, aggregate,
- server, xrn, [cred], options)
-
- results = multiclient.get_results()
- manifest_version = version_manager._get_version(
- 'GENI', '3', 'manifest')
- result_rspec = RSpec(version=manifest_version)
- geni_slivers = []
- geni_urn = None
- for result in results:
- self.add_slicemgr_stat(result_rspec, "Provision", result["aggregate"], result["elapsed"],
- result["status"], result.get("exc_info", None))
- if result["status"] == "success":
- try:
- res = result['result']['value']
- geni_urn = res['geni_urn']
- result_rspec.version.merge(
- ReturnValue.get_value(res['geni_rspec']))
- geni_slivers.extend(res['geni_slivers'])
- except:
- api.logger.log_exc(
- "SM.Provision: Failed to merge aggregate rspec")
- return {
- 'geni_urn': geni_urn,
- 'geni_rspec': result_rspec.toxml(),
- 'geni_slivers': geni_slivers
- }
-
- def Renew(self, api, xrn, creds, expiration_time, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return True
-
- def _Renew(aggregate, server, xrn, creds, expiration_time, options):
- try:
- result = server.Renew(xrn, creds, expiration_time, options)
- if type(result) != dict:
- result = {'code': {'geni_code': 0}, 'value': result}
- result['aggregate'] = aggregate
- return result
- except:
- logger.log_exc(
- 'Something wrong in _Renew with URL %s' % server.url)
- return {'aggregate': aggregate, 'exc_info': traceback.format_exc(),
- 'code': {'geni_code': -1},
- 'value': False, 'output': ""}
-
- # get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'renewsliver', xrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential(minimumExpiration=31 * 86400)
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(_Renew, aggregate, server, xrn, [
- cred], expiration_time, options)
-
- results = multiclient.get_results()
-
- geni_code = 0
- geni_output = ",".join([x.get('output', "") for x in results])
- geni_value = reduce(lambda x, y: x and y, [result.get(
- 'value', False) for result in results], True)
- for agg_result in results:
- agg_geni_code = agg_result['code'].get('geni_code', 0)
- if agg_geni_code:
- geni_code = agg_geni_code
-
- results = {'aggregates': results, 'code': {
- 'geni_code': geni_code}, 'value': geni_value, 'output': geni_output}
-
- return results
-
- def Delete(self, api, xrn, creds, options):
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return ""
-
- def _Delete(server, xrn, creds, options):
- return server.Delete(xrn, creds, options)
-
- (hrn, type) = urn_to_hrn(xrn[0])
- # get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'deletesliver', hrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(_Delete, server, xrn, [cred], options)
-
- results = []
- for result in multiclient.get_results():
- results += ReturnValue.get_value(result)
- return results
-
- # first draft at a merging SliverStatus
- def Status(self, api, slice_xrn, creds, options):
- def _Status(server, xrn, creds, options):
- return server.Status(xrn, creds, options)
-
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return {}
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(_Status, server, slice_xrn, [cred], options)
- results = [ReturnValue.get_value(result)
- for result in multiclient.get_results()]
-
- # get rid of any void result - e.g. when call_id was hit, where by
- # convention we return {}
- results = [
- result for result in results if result and result['geni_slivers']]
-
- # do not try to combine if there's no result
- if not results:
- return {}
-
- # otherwise let's merge stuff
- geni_slivers = []
- geni_urn = None
- for result in results:
- try:
- geni_urn = result['geni_urn']
- geni_slivers.extend(result['geni_slivers'])
- except:
- api.logger.log_exc(
- "SM.Provision: Failed to merge aggregate rspec")
- return {
- 'geni_urn': geni_urn,
- 'geni_slivers': geni_slivers
- }
-
- def Describe(self, api, creds, xrns, options):
- def _Describe(server, xrn, creds, options):
- return server.Describe(xrn, creds, options)
-
- call_id = options.get('call_id')
- if Callids().already_handled(call_id):
- return {}
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(_Describe, server, xrns, [cred], options)
- results = [ReturnValue.get_value(result)
- for result in multiclient.get_results()]
-
- # get rid of any void result - e.g. when call_id was hit, where by
- # convention we return {}
- results = [
- result for result in results if result and result.get('geni_urn')]
-
- # do not try to combine if there's no result
- if not results:
- return {}
-
- # otherwise let's merge stuff
- version_manager = VersionManager()
- manifest_version = version_manager._get_version(
- 'GENI', '3', 'manifest')
- result_rspec = RSpec(version=manifest_version)
- geni_slivers = []
- geni_urn = None
- for result in results:
- try:
- geni_urn = result['geni_urn']
- result_rspec.version.merge(
- ReturnValue.get_value(result['geni_rspec']))
- geni_slivers.extend(result['geni_slivers'])
- except:
- api.logger.log_exc(
- "SM.Provision: Failed to merge aggregate rspec")
- return {
- 'geni_urn': geni_urn,
- 'geni_rspec': result_rspec.toxml(),
- 'geni_slivers': geni_slivers
- }
-
- def PerformOperationalAction(self, api, xrn, creds, action, options):
- # get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'createsliver', xrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(server.PerformOperationalAction,
- xrn, [cred], action, options)
- multiclient.get_results()
- return 1
-
- def Shutdown(self, api, xrn, creds, options=None):
- if options is None:
- options = {}
- xrn = Xrn(xrn)
- # get the callers hrn
- valid_cred = api.auth.checkCredentials(creds, 'stopslice', xrn.hrn)[0]
- caller_hrn = Credential(cred=valid_cred).get_gid_caller().get_hrn()
-
- # attempt to use delegated credential first
- cred = api.getDelegatedCredential(creds)
- if not cred:
- cred = api.getCredential()
- multiclient = MultiClient()
- for aggregate in api.aggregates:
- # prevent infinite loop. Dont send request back to caller
- # unless the caller is the aggregate's SM
- if caller_hrn == aggregate and aggregate != api.hrn:
- continue
- interface = api.aggregates[aggregate]
- server = api.server_proxy(interface, cred)
- multiclient.run(server.Shutdown, xrn.urn, cred)
- multiclient.get_results()
- return 1
+++ /dev/null
-#
-# an adapter on top of driver implementing AM API v2 to be AM API v3 compliant
-#
-import sys
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import Xrn, urn_to_hrn, hrn_to_urn, get_leaf, get_authority
-from sfa.util.cache import Cache
-from sfa.rspecs.rspec import RSpec
-from sfa.storage.model import SliverAllocation
-
-
-class V2ToV3Adapter:
-
- def __init__(self, api):
- config = api.config
- flavour = config.SFA_GENERIC_FLAVOUR
- # to be cleaned
- if flavour == "nitos":
- from sfa.nitos.nitosdriver import NitosDriver
- self.driver = NitosDriver(api)
- elif flavour == "fd":
- from sfa.federica.fddriver import FdDriver
- self.driver = FdDriver(api)
- else:
- logger.error(
- "V2ToV3Adapter: Unknown Flavour !!!\n Supported Flavours: nitos, fd")
-
- # Caching
- if config.SFA_AGGREGATE_CACHING:
- if self.driver.cache:
- self.cache = self.driver.cache
- else:
- self.cache = Cache()
-
- def __getattr__(self, name):
- def func(*args, **kwds):
- if name == "list_resources":
- (version, options) = args
- slice_urn = slice_hrn = None
- creds = []
- rspec = getattr(self.driver, "list_resources")(
- slice_urn, slice_hrn, [], options)
- result = rspec
-
- elif name == "describe":
- (urns, version, options) = args
- slice_urn = urns[0]
- slice_hrn, type = urn_to_hrn(slice_urn)
- creds = []
- rspec = getattr(self.driver, "list_resources")(
- slice_urn, slice_hrn, creds, options)
-
- # SliverAllocation
- if len(urns) == 1 and Xrn(xrn=urns[0]).type == 'slice':
- constraint = SliverAllocation.slice_urn.in_(urns)
- else:
- constraint = SliverAllocation.sliver_id.in_(urns)
-
- sliver_allocations = self.driver.api.dbsession().query(
- SliverAllocation).filter(constraint)
- sliver_status = getattr(self.driver, "sliver_status")(
- slice_urn, slice_hrn)
- if 'geni_expires' in sliver_status.keys():
- geni_expires = sliver_status['geni_expires']
- else:
- geni_expires = ''
-
- geni_slivers = []
- for sliver_allocation in sliver_allocations:
- geni_sliver = {}
- geni_sliver['geni_expires'] = geni_expires
- geni_sliver[
- 'geni_allocation'] = sliver_allocation.allocation_state
- geni_sliver[
- 'geni_sliver_urn'] = sliver_allocation.sliver_id
- geni_sliver['geni_error'] = ''
- if geni_sliver['geni_allocation'] == 'geni_allocated':
- geni_sliver[
- 'geni_operational_status'] = 'geni_pending_allocation'
- else:
- geni_sliver['geni_operational_status'] = 'geni_ready'
- geni_slivers.append(geni_sliver)
-
- result = {'geni_urn': slice_urn,
- 'geni_rspec': rspec,
- 'geni_slivers': geni_slivers}
-
- elif name == "allocate":
- (slice_urn, rspec_string, expiration, options) = args
- slice_hrn, type = urn_to_hrn(slice_urn)
- creds = []
- users = options.get('sfa_users', [])
- manifest_string = getattr(self.driver, "create_sliver")(
- slice_urn, slice_hrn, creds, rspec_string, users, options)
-
- # slivers allocation
- rspec = RSpec(manifest_string)
- slivers = rspec.version.get_nodes_with_slivers()
-
- # SliverAllocation
- for sliver in slivers:
- client_id = sliver['client_id']
- component_id = sliver['component_id']
- component_name = sliver['component_name']
- slice_name = slice_hrn.replace('.', '-')
- component_short_name = component_name.split('.')[0]
- # self.driver.hrn
- sliver_hrn = '%s.%s-%s' % (self.driver.hrn,
- slice_name, component_short_name)
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
- record = SliverAllocation(sliver_id=sliver_id,
- client_id=client_id,
- component_id=component_id,
- slice_urn=slice_urn,
- allocation_state='geni_allocated')
-
- record.sync(self.driver.api.dbsession())
-
- # return manifest
- rspec_version = RSpec(rspec_string).version
- rspec_version_str = "%s" % rspec_version
- options['geni_rspec_version'] = {'version': rspec_version_str.split(
- ' ')[1], 'type': rspec_version_str.lower().split(' ')[0]}
- result = self.describe([slice_urn], rspec_version, options)
-
- elif name == "provision":
- (urns, options) = args
- if len(urns) == 1 and Xrn(xrn=urns[0]).type == 'slice':
- constraint = SliverAllocation.slice_urn.in_(urns)
- else:
- constraint = SliverAllocation.sliver_id.in_(urns)
-
- dbsession = self.driver.api.dbsession()
- sliver_allocations = dbsession.query(
- SliverAllocation).filter(constraint)
- for sliver_allocation in sliver_allocations:
- sliver_allocation.allocation_state = 'geni_provisioned'
-
- dbsession.commit()
- result = self.describe(urns, '', options)
-
- elif name == "status":
- urns = args
- options = {}
- options['geni_rspec_version'] = {
- 'version': '3', 'type': 'GENI'}
- descr = self.describe(urns[0], '', options)
- result = {'geni_urn': descr['geni_urn'],
- 'geni_slivers': descr['geni_slivers']}
-
- elif name == "delete":
- (urns, options) = args
- slice_urn = urns[0]
- slice_hrn, type = urn_to_hrn(slice_urn)
- creds = []
- options['geni_rspec_version'] = {
- 'version': '3', 'type': 'GENI'}
- descr = self.describe(urns, '', options)
- result = []
- for sliver_allocation in descr['geni_slivers']:
- geni_sliver = {'geni_sliver_urn': sliver_allocation['geni_sliver_urn'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': sliver_allocation['geni_expires'],
- 'geni_error': sliver_allocation['geni_error']}
-
- result.append(geni_sliver)
-
- getattr(self.driver, "delete_sliver")(
- slice_urn, slice_hrn, creds, options)
-
- # SliverAllocation
- constraints = SliverAllocation.slice_urn.in_(urns)
- dbsession = self.driver.api.dbsession()
- sliver_allocations = dbsession.query(
- SliverAllocation).filter(constraints)
- sliver_ids = [
- sliver_allocation.sliver_id for sliver_allocation in sliver_allocations]
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
-
- elif name == "renew":
- (urns, expiration_time, options) = args
- slice_urn = urns[0]
- slice_hrn, type = urn_to_hrn(slice_urn)
- creds = []
-
- getattr(self.driver, "renew_sliver")(
- slice_urn, slice_hrn, creds, expiration_time, options)
-
- options['geni_rspec_version'] = {
- 'version': '3', 'type': 'GENI'}
- descr = self.describe(urns, '', options)
- result = descr['geni_slivers']
-
- elif name == "perform_operational_action":
- (urns, action, options) = args
- options['geni_rspec_version'] = {
- 'version': '3', 'type': 'GENI'}
- result = self.describe(urns, '', options)['geni_slivers']
-
- else:
- # same as v2 ( registry methods)
- result = getattr(self.driver, name)(*args, **kwds)
- return result
- return func
class Allocate(Method):
"""
- Allocate resources as described in a request RSpec argument
- to a slice with the named URN. On success, one or more slivers
- are allocated, containing resources satisfying the request, and
- assigned to the given slice. This method returns a listing and
- description of the resources reserved for the slice by this
- operation, in the form of a manifest RSpec. Allocated slivers
- are held for an aggregate-determined period. Clients must Renew
- or Provision slivers before the expiration time (given in the
+ Allocate resources as described in a request RSpec argument
+ to a slice with the named URN. On success, one or more slivers
+ are allocated, containing resources satisfying the request, and
+ assigned to the given slice. This method returns a listing and
+ description of the resources reserved for the slice by this
+ operation, in the form of a manifest RSpec. Allocated slivers
+ are held for an aggregate-determined period. Clients must Renew
+ or Provision slivers before the expiration time (given in the
return struct), or the aggregate will automatically Delete them.
@param slice_urn (string) URN of slice to allocate to
This option can take 3 values
(*) options['pltags'] == 'ignore' (default)
- This is the recommended mode; in this mode all slice tags passed
- here are ignore, which correspond to the <planetlab:attribute> XML tags in
+ This is the recommended mode; in this mode all slice tags passed
+ here are ignore, which correspond to the <planetlab:attribute> XML tags in
the <sliver_type> areas of incoming rspec to Allocate.
In other words you are guaranteed to leave slice tags alone.
(*) options['pltags'] == 'append'
- All incoming slice tags are added to corresponding slivers,
+ All incoming slice tags are added to corresponding slivers,
unless an exact match can be found in the PLC db
(*) options['pltags'] == 'sync'
The historical mode, that attempts to leave the PLC db in a state
See also http://svn.planet-lab.org/wiki/SFASliceTags
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Parameter(str, "Slice URN"),
Parameter(type([dict]), "List of credentials"),
# the slivers should expire.
expiration = datetime_to_string(the_credential.expiration)
- self.api.logger.debug(
+ logger.debug(
"Allocate, received expiration from credential: %s" % expiration)
# turned off, as passing an empty rspec is indeed useful for cleaning up the slice
# flter rspec through sfatables
if self.api.interface in ['aggregate']:
chain_name = 'INCOMING'
- elif self.api.interface in ['slicemgr']:
- chain_name = 'FORWARD-INCOMING'
- self.api.logger.debug("Allocate: sfatables on chain %s" % chain_name)
+ logger.debug("Allocate: sfatables on chain %s" % chain_name)
actual_caller_hrn = the_credential.actual_caller_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, actual_caller_hrn, xrn.get_hrn(), self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, actual_caller_hrn, xrn.get_hrn(), self.name))
rspec = run_sfatables(chain_name, xrn.get_hrn(),
actual_caller_hrn, rspec)
# turned off, as passing an empty rspec is indeed useful for cleaning up the slice
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.storage.parameter import Parameter, Mixed
from sfa.trust.credential import Credential
# log the call
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, xrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, xrn, self.name))
return self.api.manager.CreateGid(self.api, xrn, cert)
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
+
from sfa.storage.parameter import Parameter, Mixed
from sfa.trust.auth import Auth
from sfa.trust.credential import Credential
class Delete(Method):
"""
- Remove the slice or slivers and free the allocated resources
+ Remove the slice or slivers and free the allocated resources
@param xrns human readable name of slice to instantiate (hrn or urn)
@param creds credential string specifying the rights of the caller
- @return 1 is successful, faults otherwise
+ @return 1 is successful, faults otherwise
"""
- interfaces = ['aggregate', 'slicemgr', 'component']
+ interfaces = ['aggregate']
accepts = [
Parameter(
returns = Parameter(int, "1 if successful")
def call(self, xrns, creds, options):
- valid_creds = self.api.auth.checkCredentialsSpeaksFor(creds, 'deletesliver', xrns,
- check_sliver_callback=self.api.driver.check_sliver_credentials,
- options=options)
+ valid_creds = self.api.auth.checkCredentialsSpeaksFor(
+ creds, 'deletesliver', xrns,
+ check_sliver_callback=self.api.driver.check_sliver_credentials,
+ options=options)
# log the call
origin_hrn = Credential(cred=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, xrns, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, xrns, self.name))
return self.api.manager.Delete(self.api, xrns, creds, options)
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
from sfa.util.faults import SfaInvalidArgument
+from sfa.util.sfalogging import logger
+
from sfa.trust.credential import Credential
from sfa.storage.parameter import Parameter, Mixed
class Describe(Method):
"""
- Retrieve a manifest RSpec describing the resources contained by the
- named entities, e.g. a single slice or a set of the slivers in a
- slice. This listing and description should be sufficiently
- descriptive to allow experimenters to use the resources.
+ Retrieve a manifest RSpec describing the resources contained by the
+ named entities, e.g. a single slice or a set of the slivers in a
+ slice. This listing and description should be sufficiently
+ descriptive to allow experimenters to use the resources.
@param credential list
@param options dictionary
@return dict
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Parameter(type([str]), "List of URNs"),
Mixed(Parameter(str, "Credential string"),
returns = Parameter(str, "List of resources")
def call(self, urns, creds, options):
- self.api.logger.info("interface: %s\tmethod-name: %s" %
- (self.api.interface, self.name))
+ logger.info("interface: %s\tmethod-name: %s" %
+ (self.api.interface, self.name))
# client must specify a version
if not options.get('geni_rspec_version'):
# filter rspec through sfatables
if self.api.interface in ['aggregate']:
chain_name = 'OUTGOING'
- elif self.api.interface in ['slicemgr']:
- chain_name = 'FORWARD-OUTGOING'
- self.api.logger.debug(
+ logger.debug(
"ListResources: sfatables on chain %s" % chain_name)
desc['geni_rspec'] = run_sfatables(
chain_name, '', origin_hrn, desc['geni_rspec'])
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
@param cred credential object specifying rights of the caller
@param type type of object (user | slice | node | authority )
- @return the string representation of a credential object
+ @return the string representation of a credential object
"""
interfaces = ['registry']
# log the call
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, hrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.GetCredential(self.api, xrn, type, self.api.auth.client_gid.get_urn())
from sfa.util.faults import RecordNotFound, ConnectionKeyGIDMismatch
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.certificate import Certificate
class GetSelfCredential(Method):
"""
Retrive a credential for an object
- @param cert certificate string
+ @param cert certificate string
@param type type of object (user | slice | sa | ma | node)
@param hrn human readable name of object (hrn or urn)
- @return the string representation of a credential object
+ @return the string representation of a credential object
"""
interfaces = ['registry']
self.api.auth.verify_object_belongs_to_me(hrn)
origin_hrn = Certificate(string=cert).get_subject()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, hrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, hrn, self.name))
# authenticate the gid
# import here so we can load this module at build-time for sfa2wsdl
certificate = Certificate(string=cert)
if not certificate.is_pubkey(gid.get_pubkey()):
for (obj, name) in [(certificate, "CERT"), (gid, "GID"), ]:
- self.api.logger.debug("ConnectionKeyGIDMismatch, %s pubkey: %s" % (
+ logger.debug("ConnectionKeyGIDMismatch, %s pubkey: %s" % (
name, obj.get_pubkey().get_pubkey_string()))
- self.api.logger.debug(
+ logger.debug(
"ConnectionKeyGIDMismatch, %s dump: %s" % (name, obj.dump_string()))
if hasattr(obj, 'filename'):
- self.api.logger.debug(
+ logger.debug(
"ConnectionKeyGIDMismatch, %s filename: %s" % (name, obj.filename))
raise ConnectionKeyGIDMismatch(gid.get_subject())
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.storage.parameter import Parameter
Returns this GENI Aggregate Manager's Version Information
@return version
"""
- interfaces = ['registry', 'aggregate', 'slicemgr', 'component']
+ interfaces = ['registry', 'aggregate']
accepts = [
Parameter(dict, "Options")
]
def call(self, options=None):
if options is None:
options = {}
- self.api.logger.info("interface: %s\tmethod-name: %s" %
- (self.api.interface, self.name))
+ logger.info("interface: %s\tmethod-name: %s" %
+ (self.api.interface, self.name))
return self.api.manager.GetVersion(self.api, options)
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
class List(Method):
"""
- List the records in an authority.
+ List the records in an authority.
@param cred credential string specifying the rights of the caller
@param hrn human readable name of authority to list (hrn or urn)
- @return list of record dictionaries
+ @return list of record dictionaries
"""
interfaces = ['registry']
# log the call
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, hrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.List(self.api, xrn, options=options)
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
+from sfa.util.sfalogging import logger
+
from sfa.util.faults import SfaInvalidArgument
from sfa.trust.credential import Credential
@param options dictionary
@return string
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
returns = Parameter(str, "List of resources")
def call(self, creds, options):
- self.api.logger.info("interface: %s\tmethod-name: %s" %
- (self.api.interface, self.name))
+ logger.info("interface: %s\tmethod-name: %s" %
+ (self.api.interface, self.name))
# client must specify a version
if not options.get('geni_rspec_version'):
# filter rspec through sfatables
if self.api.interface in ['aggregate']:
chain_name = 'OUTGOING'
- elif self.api.interface in ['slicemgr']:
- chain_name = 'FORWARD-OUTGOING'
- self.api.logger.debug(
+ logger.debug(
"ListResources: sfatables on chain %s" % chain_name)
filtered_rspec = run_sfatables(chain_name, '', origin_hrn, rspec)
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
+from sfa.util.sfalogging import logger
+
from sfa.trust.credential import Credential
+
from sfa.storage.parameter import Parameter, Mixed
class PerformOperationalAction(Method):
"""
- Request that the named geni_allocated slivers be made
- geni_provisioned, instantiating or otherwise realizing the
- resources, such that they have a valid geni_operational_status
- and may possibly be made geni_ready for experimenter use. This
- operation is synchronous, but may start a longer process, such
+ Request that the named geni_allocated slivers be made
+ geni_provisioned, instantiating or otherwise realizing the
+ resources, such that they have a valid geni_operational_status
+ and may possibly be made geni_ready for experimenter use. This
+ operation is synchronous, but may start a longer process, such
as creating and imaging a virtual machine
@param slice urns ([string]) URNs of slivers to provision to
@param options (dict) options
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Parameter(type([str]), "URNs"),
Parameter(type([dict]), "Credentials"),
returns = Parameter(dict, "Provisioned Resources")
def call(self, xrns, creds, action, options):
- self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, xrns, self.name))
+ logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, xrns, self.name))
(speaking_for, _) = urn_to_hrn(options.get('geni_speaking_for'))
# Find the valid credentials
- valid_creds = self.api.auth.checkCredentialsSpeaksFor(creds, 'createsliver', xrns,
- check_sliver_callback=self.api.driver.check_sliver_credentials,
- options=options)
+ valid_creds = self.api.auth.checkCredentialsSpeaksFor(
+ creds, 'createsliver', xrns,
+ check_sliver_callback=self.api.driver.check_sliver_credentials,
+ options=options)
origin_hrn = Credential(cred=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, xrns, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, xrns, self.name))
result = self.api.manager.PerformOperationalAction(
self.api, xrns, creds, action, options)
return result
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatablesRuntime import run_sfatables
+from sfa.util.sfalogging import logger
+
from sfa.trust.credential import Credential
+
from sfa.storage.parameter import Parameter, Mixed
+
from sfa.rspecs.rspec import RSpec
class Provision(Method):
"""
- Request that the named geni_allocated slivers be made
- geni_provisioned, instantiating or otherwise realizing the
- resources, such that they have a valid geni_operational_status
- and may possibly be made geni_ready for experimenter use. This
- operation is synchronous, but may start a longer process, such
+ Request that the named geni_allocated slivers be made
+ geni_provisioned, instantiating or otherwise realizing the
+ resources, such that they have a valid geni_operational_status
+ and may possibly be made geni_ready for experimenter use. This
+ operation is synchronous, but may start a longer process, such
as creating and imaging a virtual machine
@param slice urns ([string]) URNs of slivers to provision to
@param options (dict) options
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Parameter(type([str]), "URNs"),
Parameter(type([dict]), "Credentials"),
returns = Parameter(dict, "Provisioned Resources")
def call(self, xrns, creds, options):
- self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, xrns, self.name))
+ logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, xrns, self.name))
# Find the valid credentials
- valid_creds = self.api.auth.checkCredentialsSpeaksFor(creds, 'createsliver', xrns,
- check_sliver_callback=self.api.driver.check_sliver_credentials,
- options=options)
+ valid_creds = self.api.auth.checkCredentialsSpeaksFor(
+ creds, 'createsliver', xrns,
+ check_sliver_callback=self.api.driver.check_sliver_credentials,
+ options=options)
origin_hrn = Credential(cred=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, xrns, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, xrns, self.name))
result = self.api.manager.Provision(self.api, xrns, creds, options)
return result
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
# log the call
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, hrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.Register(self.api, record)
from sfa.util.xrn import Xrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
@param type record type
@param xrn human readable name of record to remove (hrn or urn)
- @return 1 if successful, faults otherwise
+ @return 1 if successful, faults otherwise
"""
interfaces = ['registry']
# log the call
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s" % (
- self.api.interface, self.name, origin_hrn, xrn.get_urn()))
+ logger.info("interface: %s\tmethod-name: %s\tcaller-hrn: %s\ttarget-urn: %s" % (
+ self.api.interface, self.name, origin_hrn, xrn.get_urn()))
return self.api.manager.Remove(self.api, xrn)
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.sfatime import utcparse, add_datetime
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
class Renew(Method):
"""
- Renews the resources in the specified slice or slivers by
+ Renews the resources in the specified slice or slivers by
extending the lifetime.
@param urns ([string]) List of URNs of to renew
@param expiration_time (string) requested time of expiration
@param options (dict) options
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Parameter(type([str]), "Slice URN"),
Parameter(type([str]), "List of credentials"),
def call(self, urns, creds, expiration_time, options):
# Find the valid credentials
- valid_creds = self.api.auth.checkCredentialsSpeaksFor(creds, 'renewsliver', urns,
- check_sliver_callback=self.api.driver.check_sliver_credentials,
- options=options)
+ valid_creds = self.api.auth.checkCredentialsSpeaksFor(
+ creds, 'renewsliver', urns,
+ check_sliver_callback=self.api.driver.check_sliver_credentials,
+ options=options)
the_credential = Credential(cred=valid_creds[0])
actual_caller_hrn = the_credential.actual_caller_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-urns: %s\texpiration:%s\tmethod-name: %s" %
- (self.api.interface, actual_caller_hrn, urns, expiration_time, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-urns: %s\texpiration:%s\tmethod-name: %s" %
+ (self.api.interface, actual_caller_hrn, urns, expiration_time, self.name))
# extend as long as possible : take the min of requested and
# now+SFA_MAX_SLICE_RENEW
# Validate that the time does not go beyond the credential's expiration
# time
requested_expire = utcparse(expiration_time)
- self.api.logger.info("requested_expire = %s" % requested_expire)
+ logger.info("requested_expire = %s" % requested_expire)
credential_expire = the_credential.get_expiration()
- self.api.logger.info("credential_expire = %s" % credential_expire)
+ logger.info("credential_expire = %s" % credential_expire)
max_renew_days = int(self.api.config.SFA_MAX_SLICE_RENEW)
max_expire = datetime.datetime.utcnow() + datetime.timedelta(days=max_renew_days)
if requested_expire > credential_expire:
# used to throw an InsufficientRights exception here, this was not
# right
- self.api.logger.warning("Requested expiration %s, after credential expiration (%s) -> trimming to the latter/sooner" %
- (requested_expire, credential_expire))
+ logger.warning("Requested expiration %s, after credential expiration (%s) -> trimming to the latter/sooner" %
+ (requested_expire, credential_expire))
requested_expire = credential_expire
if requested_expire > max_expire:
# likewise
- self.api.logger.warning("Requested expiration %s, after maximal expiration %s days (%s) -> trimming to the latter/sooner" %
- (requested_expire, self.api.config.SFA_MAX_SLICE_RENEW, max_expire))
+ logger.warning("Requested expiration %s, after maximal expiration %s days (%s) -> trimming to the latter/sooner" %
+ (requested_expire, self.api.config.SFA_MAX_SLICE_RENEW, max_expire))
requested_expire = max_expire
return self.api.manager.Renew(self.api, urns, creds, requested_expire, options)
from sfa.util.xrn import Xrn, urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
Resolve a record.
@param cred credential string authorizing the caller
- @param hrn human readable name to resolve (hrn or urn)
- @return a list of record dictionaries or empty list
+ @param hrn human readable name to resolve (hrn or urn)
+ @return a list of record dictionaries or empty list
"""
interfaces = ['registry']
# log the call
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, hrns, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, hrns, self.name))
# send the call to the right manager
return self.api.manager.Resolve(self.api, xrns, type, details=details)
from sfa.storage.parameter import Parameter
from sfa.trust.credential import Credential
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
class Shutdown(Method):
"""
- Perform an emergency shut down of a sliver. This operation is intended for administrative use.
+ Perform an emergency shut down of a sliver. This operation is intended for administrative use.
The sliver is shut down but remains available for further forensics.
@param slice_urn (string) URN of slice to renew
- @param credentials ([string]) of credentials
+ @param credentials ([string]) of credentials
"""
- interfaces = ['aggregate', 'slicemgr']
+ interfaces = ['aggregate']
accepts = [
Parameter(str, "Slice URN"),
Parameter(type([dict]), "Credentials"),
def call(self, xrn, creds):
- valid_creds = self.api.auth.checkCredentials(creds, 'stopslice', xrn,
- check_sliver_callback=self.api.driver.check_sliver_credentials)
+ valid_creds = self.api.auth.checkCredentials(
+ creds, 'stopslice', xrn,
+ check_sliver_callback=self.api.driver.check_sliver_credentials)
# log the call
origin_hrn = Credential(cred=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, xrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, xrn, self.name))
return self.api.manager.Shutdown(self.api, xrn, creds)
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.storage.parameter import Parameter, Mixed
@param slice_urn (string) URN of slice to allocate to
"""
- interfaces = ['aggregate', 'slicemgr', 'component']
+ interfaces = ['aggregate']
accepts = [
Parameter(type([str]), "Slice or sliver URNs"),
Parameter(type([dict]), "credentials"),
returns = Parameter(dict, "Status details")
def call(self, xrns, creds, options):
- valid_creds = self.api.auth.checkCredentialsSpeaksFor(creds, 'sliverstatus', xrns,
- check_sliver_callback=self.api.driver.check_sliver_credentials,
- options=options)
+ valid_creds = self.api.auth.checkCredentialsSpeaksFor(
+ creds, 'sliverstatus', xrns,
+ check_sliver_callback=self.api.driver.check_sliver_credentials,
+ options=options)
- self.api.logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, xrns, self.name))
+ logger.info("interface: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, xrns, self.name))
return self.api.manager.Status(self.api, xrns, creds, options)
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.credential import Credential
@param cred credential string specifying rights of the caller
@param record a record dictionary to be updated
- @return 1 if successful, faults otherwise
+ @return 1 if successful, faults otherwise
"""
interfaces = ['registry']
# log
origin_hrn = Credential(
string=valid_creds[0]).get_gid_caller().get_hrn()
- self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
- (self.api.interface, origin_hrn, hrn, self.name))
+ logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s" %
+ (self.api.interface, origin_hrn, hrn, self.name))
return self.api.manager.Update(self.api, record_dict)
from sfa.util.method import Method
+from sfa.util.sfalogging import logger
from sfa.trust.auth import Auth
from sfa.trust.credential import Credential
class get_trusted_certs(Method):
"""
@param cred credential string specifying the rights of the caller
- @return list of gid strings
+ @return list of gid strings
"""
- interfaces = ['registry', 'aggregate', 'slicemgr']
+ interfaces = ['registry', 'aggregate']
accepts = [
Mixed(Parameter(str, "Credential string"),
# If cred is not specified just return the gid for this interface.
# This is true when when a peer is attempting to initiate federation
# with this interface
- self.api.logger.debug("get_trusted_certs: %r" % cred)
+ logger.debug("get_trusted_certs: %r" % cred)
if not cred:
gid_strings = []
for gid in self.api.auth.trusted_cert_list:
+++ /dev/null
-#!/usr/bin/python
-from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn
-from sfa.util.sfatime import utcparse, datetime_to_string
-from sfa.util.sfalogging import logger
-
-from sfa.rspecs.rspec import RSpec
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.node import NodeElement
-from sfa.rspecs.elements.link import Link
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.login import Login
-from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.position_3d import Position3D
-from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.pltag import PLTag
-from sfa.rspecs.elements.lease import Lease
-from sfa.rspecs.elements.granularity import Granularity
-from sfa.rspecs.elements.channel import Channel
-from sfa.rspecs.version_manager import VersionManager
-
-from sfa.nitos.nitosxrn import NitosXrn, hostname_to_urn, hrn_to_nitos_slicename, slicename_to_hrn, channel_to_urn
-from sfa.planetlab.vlink import get_tc_rate
-from sfa.planetlab.topology import Topology
-
-import time
-
-
-class NitosAggregate:
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_slice_and_slivers(self, slice_xrn):
- """
- Returns a dict of slivers keyed on the sliver's node_id
- """
- slivers = {}
- slice = None
- if not slice_xrn:
- return (slice, slivers)
- slice_urn = hrn_to_urn(slice_xrn, 'slice')
- slice_hrn, _ = urn_to_hrn(slice_xrn)
- slice_name = hrn_to_nitos_slicename(slice_hrn)
- slices = self.driver.shell.getSlices({'slice_name': slice_name}, [])
- # filter results
- for slc in slices:
- if slc['slice_name'] == slice_name:
- slice = slc
- break
-
- if not slice:
- return (slice, slivers)
-
- reserved_nodes = self.driver.shell.getReservedNodes(
- {'slice_id': slice['slice_id']}, [])
- reserved_node_ids = []
- # filter on the slice
- for node in reserved_nodes:
- if node['slice_id'] == slice['slice_id']:
- reserved_node_ids.append(node['node_id'])
- # get all the nodes
- all_nodes = self.driver.shell.getNodes({}, [])
-
- for node in all_nodes:
- if node['node_id'] in reserved_node_ids:
- slivers[node['node_id']] = node
-
- return (slice, slivers)
-
- def get_nodes(self, slice_xrn, slice=None, slivers=None, options=None):
- if slivers is None:
- slivers = {}
- if options is None:
- options = {}
- # if we are dealing with a slice that has no node just return
- # and empty list
- if slice_xrn:
- if not slice or not slivers:
- return []
- else:
- nodes = [slivers[sliver] for sliver in slivers]
- else:
- nodes = self.driver.shell.getNodes({}, [])
-
- # get the granularity in second for the reservation system
- grain = self.driver.testbedInfo['grain']
- #grain = 1800
-
- rspec_nodes = []
- for node in nodes:
- rspec_node = NodeElement()
- site_name = self.driver.testbedInfo['name']
- rspec_node['component_id'] = hostname_to_urn(
- self.driver.hrn, site_name, node['hostname'])
- rspec_node['component_name'] = node['hostname']
- rspec_node['component_manager_id'] = Xrn(
- self.driver.hrn, 'authority+cm').get_urn()
- rspec_node['authority_id'] = hrn_to_urn(
- NitosXrn.site_hrn(self.driver.hrn, site_name), 'authority+sa')
- # do not include boot state (<available> element) in the manifest rspec
- # if not slice:
- # rspec_node['boot_state'] = node['boot_state']
- rspec_node['exclusive'] = 'true'
- # site location
- longitude = self.driver.testbedInfo['longitude']
- latitude = self.driver.testbedInfo['latitude']
- if longitude and latitude:
- location = Location(
- {'longitude': longitude, 'latitude': latitude, 'country': 'unknown'})
- rspec_node['location'] = location
- # 3D position
- position_3d = Position3D({'x': node['position']['X'], 'y': node[
- 'position']['Y'], 'z': node['position']['Z']})
- #position_3d = Position3D({'x': 1, 'y': 2, 'z': 3})
- rspec_node['position_3d'] = position_3d
- # Granularity
- granularity = Granularity({'grain': grain})
- rspec_node['granularity'] = granularity
-
- # HardwareType
- rspec_node['hardware_type'] = node['node_type']
- #rspec_node['hardware_type'] = "orbit"
-
- # slivers
- if node['node_id'] in slivers:
- # add sliver info
- sliver = slivers[node['node_id']]
- rspec_node['sliver_id'] = sliver['node_id']
- rspec_node['client_id'] = node['hostname']
- rspec_node['slivers'] = [sliver]
-
- rspec_nodes.append(rspec_node)
- return rspec_nodes
-
- def get_leases_and_channels(self, slice=None, slice_xrn=None, options=None):
-
- if options is None:
- options = {}
- slices = self.driver.shell.getSlices({}, [])
- nodes = self.driver.shell.getNodes({}, [])
- leases = self.driver.shell.getReservedNodes({}, [])
- channels = self.driver.shell.getChannels({}, [])
- reserved_channels = self.driver.shell.getReservedChannels()
- grain = self.driver.testbedInfo['grain']
-
- if slice_xrn and not slice:
- return ([], [])
-
- if slice:
- all_leases = []
- all_leases.extend(leases)
- all_reserved_channels = []
- all_reserved_channels.extend(reserved_channels)
- for lease in all_leases:
- if lease['slice_id'] != slice['slice_id']:
- leases.remove(lease)
- for channel in all_reserved_channels:
- if channel['slice_id'] != slice['slice_id']:
- reserved_channels.remove(channel)
-
- rspec_channels = []
- for channel in reserved_channels:
-
- rspec_channel = {}
- # retrieve channel number
- for chl in channels:
- if chl['channel_id'] == channel['channel_id']:
- channel_number = chl['channel']
- break
-
- rspec_channel['channel_num'] = channel_number
- rspec_channel['start_time'] = channel['start_time']
- rspec_channel['duration'] = (
- int(channel['end_time']) - int(channel['start_time'])) / int(grain)
- rspec_channel['component_id'] = channel_to_urn(
- self.driver.hrn, self.driver.testbedInfo['name'], channel_number)
-
- # retreive slicename
- for slc in slices:
- if slc['slice_id'] == channel['slice_id']:
- slicename = slc['slice_name']
- break
-
- if slice_xrn:
- slice_urn = slice_xrn
- slice_hrn = urn_to_hrn(slice_urn)
- else:
- slice_hrn = slicename_to_hrn(
- self.driver.hrn, self.driver.testbedInfo['name'], slicename)
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
-
- rspec_channel['slice_id'] = slice_urn
- rspec_channels.append(rspec_channel)
-
- rspec_leases = []
- for lease in leases:
-
- rspec_lease = Lease()
-
- rspec_lease['lease_id'] = lease['reservation_id']
- # retreive node name
- for node in nodes:
- if node['node_id'] == lease['node_id']:
- nodename = node['hostname']
- break
-
- rspec_lease['component_id'] = hostname_to_urn(
- self.driver.hrn, self.driver.testbedInfo['name'], nodename)
- # retreive slicename
- for slc in slices:
- if slc['slice_id'] == lease['slice_id']:
- slicename = slc['slice_name']
- break
-
- if slice_xrn:
- slice_urn = slice_xrn
- slice_hrn = urn_to_hrn(slice_urn)
- else:
- slice_hrn = slicename_to_hrn(
- self.driver.hrn, self.driver.testbedInfo['name'], slicename)
- slice_urn = hrn_to_urn(slice_hrn, 'slice')
-
- rspec_lease['slice_id'] = slice_urn
- rspec_lease['start_time'] = lease['start_time']
- rspec_lease['duration'] = (
- int(lease['end_time']) - int(lease['start_time'])) / int(grain)
- rspec_leases.append(rspec_lease)
-
- return (rspec_leases, rspec_channels)
-
- def get_channels(self, slice=None, options=None):
- if options is None:
- options = {}
-
- all_channels = self.driver.shell.getChannels({}, [])
- channels = []
- if slice:
- reserved_channels = self.driver.shell.getReservedChannels()
- reserved_channel_ids = []
- for channel in reserved_channels:
- if channel['slice_id'] == slice['slice_id']:
- reserved_channel_ids.append(channel['channel_id'])
-
- for channel in all_channels:
- if channel['channel_id'] in reserved_channel_ids:
- channels.append(channel)
- else:
- channels = all_channels
-
- rspec_channels = []
- for channel in channels:
- rspec_channel = Channel()
- rspec_channel['channel_num'] = channel['channel']
- rspec_channel['frequency'] = channel['frequency']
- rspec_channel['standard'] = channel['modulation']
- rspec_channel['component_id'] = channel_to_urn(
- self.driver.hrn, self.driver.testbedInfo['name'], channel['channel'])
- rspec_channels.append(rspec_channel)
- return rspec_channels
-
- def get_rspec(self, slice_xrn=None, version=None, options=None):
- if options is None:
- options = {}
-
- version_manager = VersionManager()
- version = version_manager.get_version(version)
-
- if not slice_xrn:
- rspec_version = version_manager._get_version(
- version.type, version.version, 'ad')
- else:
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
-
- slice, slivers = self.get_slice_and_slivers(slice_xrn)
-
- rspec = RSpec(version=rspec_version, user_options=options)
-
- if slice and 'expires' in slice:
- rspec.xml.set('expires', datetime_to_string(
- utcparse(slice['expires'])))
-
- if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
- nodes = self.get_nodes(slice_xrn, slice, slivers, options)
- rspec.version.add_nodes(nodes)
- # add sliver defaults
- default_sliver = slivers.get(None, [])
- if default_sliver:
- default_sliver_attribs = default_sliver.get('tags', [])
- for attrib in default_sliver_attribs:
- logger.info(attrib)
- rspec.version.add_default_sliver_attribute(
- attrib['tagname'], attrib['value'])
- # add wifi channels
- channels = self.get_channels(slice, options)
- rspec.version.add_channels(channels)
-
- if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
- leases_channels = self.get_leases_and_channels(slice, slice_xrn)
- rspec.version.add_leases(leases_channels)
-
- return rspec.toxml()
+++ /dev/null
-import time
-import datetime
-#
-from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
- RecordNotFound, SfaNotImplemented, SliverDoesNotExist
-
-from sfa.util.sfalogging import logger
-from sfa.util.defaultdict import defaultdict
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_hrn
-from sfa.util.cache import Cache
-
-# one would think the driver should not need to mess with the SFA db, but..
-from sfa.storage.model import RegRecord
-
-# used to be used in get_ticket
-#from sfa.trust.sfaticket import SfaTicket
-
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-
-# the driver interface, mostly provides default behaviours
-from sfa.managers.driver import Driver
-
-from sfa.nitos.nitosshell import NitosShell
-from sfa.nitos.nitosaggregate import NitosAggregate
-from sfa.nitos.nitosslices import NitosSlices
-
-from sfa.nitos.nitosxrn import NitosXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_nitos_slicename, xrn_to_hostname
-
-
-def list_to_dict(recs, key):
- """
- convert a list of dictionaries into a dictionary keyed on the
- specified dictionary key
- """
- return dict([(rec[key], rec) for rec in recs])
-
-#
-# NitosShell is just an xmlrpc serverproxy where methods
-# can be sent as-is; it takes care of authentication
-# from the global config
-#
-
-
-class NitosDriver (Driver):
-
- # the cache instance is a class member so it survives across incoming
- # requests
- cache = None
-
- def __init__(self, api):
- Driver.__init__(self, api)
- config = api.config
- self.shell = NitosShell(config)
- self.cache = None
- self.testbedInfo = self.shell.getTestbedInfo()
-# un-comment below lines to enable caching
-# if config.SFA_AGGREGATE_CACHING:
-# if NitosDriver.cache is None:
-# NitosDriver.cache = Cache()
-# self.cache = NitosDriver.cache
-
- ###########################################
- # utility methods for NITOS driver
- ###########################################
-
- def filter_nitos_results(self, listo, filters_dict):
- """
- the Nitos scheduler API does not provide a get result filtring so we do it here
- """
- mylist = []
- mylist.extend(listo)
- for dicto in mylist:
- for filter in filters_dict:
- if filter not in dicto or dicto[filter] != filters_dict[filter]:
- listo.remove(dicto)
- break
- return listo
-
- def convert_id(self, list_of_dict):
- """
- convert object id retrived in string format to int format
- """
- for dicto in list_of_dict:
- for key in dicto:
- if key in ['node_id', 'slice_id', 'user_id', 'channel_id', 'reservation_id'] and isinstance(dicto[key], str):
- dicto[key] = int(dicto[key])
- elif key in ['user_ids']:
- user_ids2 = []
- for user_id in dicto['user_ids']:
- user_ids2.append(int(user_id))
- dicto['user_ids'] = user_ids2
- return list_of_dict
-
- ########################################
- # registry oriented
- ########################################
-
- def augment_records_with_testbed_info(self, sfa_records):
- return self.fill_record_info(sfa_records)
-
- ##########
- def register(self, sfa_record, hrn, pub_key):
- type = sfa_record['type']
- nitos_record = self.sfa_fields_to_nitos_fields(type, hrn, sfa_record)
-
- if type == 'authority':
- pointer = -1
-
- elif type == 'slice':
- slices = self.shell.getSlices()
- # filter slices
- for slice in slices:
- if slice['slice_name'] == nitos_record['name']:
- slice_id = slice['slice_id']
- break
-
- if not slice_id:
- pointer = self.shell.addSlice(
- {'slice_name': nitos_record['name']})
- else:
- pointer = slice_id
-
- elif type == 'user':
- users = self.shell.getUsers()
- # filter users
- for user in users:
- if user['user_name'] == nitos_record['name']:
- user_id = user['user_id']
- break
- if not user_id:
- pointer = self.shell.addUser(
- {'username': nitos_record['name'], 'email': nitos_record['email']})
- else:
- pointer = user_id
-
- # Add the user's key
- if pub_key:
- self.shell.addUserKey({'user_id': pointer, 'key': pub_key})
-
- elif type == 'node':
- nodes = self.shell.GetNodes({}, [])
- # filter nodes
- for node in nodes:
- if node['hostname'] == nitos_record['name']:
- node_id = node['node_id']
- break
-
- if not node_id:
- pointer = self.shell.addNode(nitos_record)
- else:
- pointer = node_id
-
- return pointer
-
- ##########
- def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
-
- pointer = old_sfa_record['pointer']
- type = old_sfa_record['type']
- new_nitos_record = self.sfa_fields_to_nitos_fields(
- type, hrn, new_sfa_record)
-
- # new_key implemented for users only
- if new_key and type not in ['user']:
- raise UnknownSfaType(type)
-
- if type == "slice":
- if 'name' in new_sfa_record:
- self.shell.updateSlice({'slice_id': pointer, 'fields': {
- 'slice_name': new_sfa_record['name']}})
-
- elif type == "user":
- update_fields = {}
- if 'name' in new_sfa_record:
- update_fields['username'] = new_sfa_record['name']
- if 'email' in new_sfa_record:
- update_fields['email'] = new_sfa_record['email']
-
- self.shell.updateUser(
- {'user_id': pointer, 'fields': update_fields})
-
- if new_key:
- # needs to be improved
- self.shell.addUserKey({'user_id': pointer, 'key': new_key})
-
- elif type == "node":
- self.shell.updateNode(
- {'node_id': pointer, 'fields': new_sfa_record})
-
- return True
-
- ##########
- def remove(self, sfa_record):
-
- type = sfa_record['type']
- pointer = sfa_record['pointer']
- if type == 'user':
- self.shell.deleteUser({'user_id': pointer})
- elif type == 'slice':
- self.shell.deleteSlice({'slice_id': pointer})
- elif type == 'node':
- self.shell.deleteNode({'node_id': pointer})
-
- return True
-
- ##
- # Convert SFA fields to NITOS fields for use when registering or updating
- # registry record in the NITOS Scheduler database
- #
-
- def sfa_fields_to_nitos_fields(self, type, hrn, sfa_record):
-
- nitos_record = {}
-
- if type == "slice":
- nitos_record["slice_name"] = hrn_to_nitos_slicename(hrn)
- elif type == "node":
- if "hostname" not in sfa_record:
- raise MissingSfaInfo("hostname")
- nitos_record["node_name"] = sfa_record["hostname"]
-
- return nitos_record
-
- ####################
- def fill_record_info(self, records):
- """
- Given a (list of) SFA record, fill in the NITOS specific
- and SFA specific fields in the record.
- """
- if not isinstance(records, list):
- records = [records]
-
- self.fill_record_nitos_info(records)
- self.fill_record_hrns(records)
- self.fill_record_sfa_info(records)
- return records
-
- def fill_record_nitos_info(self, records):
- """
- Fill in the nitos specific fields of a SFA record. This
- involves calling the appropriate NITOS API method to retrieve the
- database record for the object.
-
- @param record: record to fill in field (in/out param)
- """
-
- # get ids by type
- node_ids, slice_ids = [], []
- user_ids, key_ids = [], []
- type_map = {'node': node_ids, 'slice': slice_ids, 'user': user_ids}
-
- for record in records:
- for type in type_map:
- if type == record['type']:
- type_map[type].append(record['pointer'])
-
- # get nitos records
- nodes, slices, users, keys = {}, {}, {}, {}
- if node_ids:
- all_nodes = self.convert_id(self.shell.getNodes({}, []))
- node_list = [node for node in all_nodes if node[
- 'node_id'] in node_ids]
- nodes = list_to_dict(node_list, 'node_id')
- if slice_ids:
- all_slices = self.convert_id(self.shell.getSlices({}, []))
- slice_list = [slice for slice in all_slices if slice[
- 'slice_id'] in slice_ids]
- slices = list_to_dict(slice_list, 'slice_id')
- if user_ids:
- all_users = self.convert_id(self.shell.getUsers())
- user_list = [user for user in all_users if user[
- 'user_id'] in user_ids]
- users = list_to_dict(user_list, 'user_id')
-
- nitos_records = {'node': nodes, 'slice': slices, 'user': users}
-
- # fill record info
- for record in records:
- if record['pointer'] == -1:
- continue
-
- for type in nitos_records:
- if record['type'] == type:
- if record['pointer'] in nitos_records[type]:
- record.update(nitos_records[type][record['pointer']])
- break
- # fill in key info
- if record['type'] == 'user':
- if record['pointer'] in nitos_records['user']:
- record['keys'] = nitos_records[
- 'user'][record['pointer']]['keys']
-
- return records
-
- def fill_record_hrns(self, records):
- """
- convert nitos ids to hrns
- """
-
- # get ids
- slice_ids, user_ids, node_ids = [], [], []
- for record in records:
- if 'user_ids' in record:
- user_ids.extend(record['user_ids'])
- if 'slice_ids' in record:
- slice_ids.extend(record['slice_ids'])
- if 'node_ids' in record:
- node_ids.extend(record['node_ids'])
-
- # get nitos records
- slices, users, nodes = {}, {}, {}
- if node_ids:
- all_nodes = self.convert_id(self.shell.getNodes({}, []))
- node_list = [node for node in all_nodes if node[
- 'node_id'] in node_ids]
- nodes = list_to_dict(node_list, 'node_id')
- if slice_ids:
- all_slices = self.convert_id(self.shell.getSlices({}, []))
- slice_list = [slice for slice in all_slices if slice[
- 'slice_id'] in slice_ids]
- slices = list_to_dict(slice_list, 'slice_id')
- if user_ids:
- all_users = self.convert_id(self.shell.getUsers())
- user_list = [user for user in all_users if user[
- 'user_id'] in user_ids]
- users = list_to_dict(user_list, 'user_id')
-
- # convert ids to hrns
- for record in records:
- # get all relevant data
- type = record['type']
- pointer = record['pointer']
- auth_hrn = self.hrn
- testbed_name = self.testbedInfo['name']
- if pointer == -1:
- continue
- if 'user_ids' in record:
- usernames = [users[user_id]['username'] for user_id in record['user_ids']
- if user_id in users]
- user_hrns = [".".join([auth_hrn, testbed_name, username])
- for username in usernames]
- record['users'] = user_hrns
- if 'slice_ids' in record:
- slicenames = [slices[slice_id]['slice_name'] for slice_id in record['slice_ids']
- if slice_id in slices]
- slice_hrns = [slicename_to_hrn(
- auth_hrn, slicename) for slicename in slicenames]
- record['slices'] = slice_hrns
- if 'node_ids' in record:
- hostnames = [nodes[node_id]['hostname'] for node_id in record['node_ids']
- if node_id in nodes]
- node_hrns = [hostname_to_hrn(
- auth_hrn, login_base, hostname) for hostname in hostnames]
- record['nodes'] = node_hrns
-
- if 'expires' in record:
- date = utcparse(record['expires'])
- datestring = datetime_to_string(date)
- record['expires'] = datestring
-
- return records
-
- def fill_record_sfa_info(self, records):
-
- def startswith(prefix, values):
- return [value for value in values if value.startswith(prefix)]
-
- # get user ids
- user_ids = []
- for record in records:
- user_ids.extend(record.get("user_ids", []))
-
- # get the registry records
- user_list, users = [], {}
- user_list = self.api.dbsession().query(RegRecord).filter(
- RegRecord.pointer.in_(user_ids)).all()
- # create a hrns keyed on the sfa record's pointer.
- # Its possible for multiple records to have the same pointer so
- # the dict's value will be a list of hrns.
- users = defaultdict(list)
- for user in user_list:
- users[user.pointer].append(user)
-
- # get the nitos records
- nitos_user_list, nitos_users = [], {}
- nitos_all_users = self.convert_id(self.shell.getUsers())
- nitos_user_list = [
- user for user in nitos_all_users if user['user_id'] in user_ids]
- nitos_users = list_to_dict(nitos_user_list, 'user_id')
-
- # fill sfa info
- for record in records:
- if record['pointer'] == -1:
- continue
-
- sfa_info = {}
- type = record['type']
- logger.info(
- "fill_record_sfa_info - incoming record typed %s" % type)
- if (type == "slice"):
- # all slice users are researchers
- record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
- record['researcher'] = []
- for user_id in record.get('user_ids', []):
- hrns = [user.hrn for user in users[user_id]]
- record['researcher'].extend(hrns)
-
- elif (type == "node"):
- sfa_info['dns'] = record.get("hostname", "")
- # xxx TODO: URI, LatLong, IP, DNS
-
- elif (type == "user"):
- logger.info('setting user.email')
- sfa_info['email'] = record.get("email", "")
- sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
- sfa_info['geni_certificate'] = record['gid']
- # xxx TODO: PostalAddress, Phone
- record.update(sfa_info)
-
- ####################
- def update_relation(self, subject_type, target_type, relation_name, subject_id, target_ids):
-
- if subject_type == 'slice' and target_type == 'user' and relation_name == 'researcher':
- subject = self.shell.getSlices({'slice_id': subject_id}, [])[0]
- current_target_ids = subject['user_ids']
- add_target_ids = list(
- set(target_ids).difference(current_target_ids))
- del_target_ids = list(
- set(current_target_ids).difference(target_ids))
- logger.debug("subject_id = %s (type=%s)" %
- (subject_id, type(subject_id)))
- for target_id in add_target_ids:
- self.shell.addUserToSlice(
- {'user_id': target_id, 'slice_id': subject_id})
- logger.debug("add_target_id = %s (type=%s)" %
- (target_id, type(target_id)))
- for target_id in del_target_ids:
- logger.debug("del_target_id = %s (type=%s)" %
- (target_id, type(target_id)))
- self.shell.deleteUserFromSlice(
- {'user_id': target_id, 'slice_id': subject_id})
- else:
- logger.info('unexpected relation %s to maintain, %s -> %s' %
- (relation_name, subject_type, target_type))
-
- ########################################
- # aggregate oriented
- ########################################
-
- def testbed_name(self): return "nitos"
-
- # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
- def aggregate_version(self):
- version_manager = VersionManager()
- ad_rspec_versions = []
- request_rspec_versions = []
- for rspec_version in version_manager.versions:
- if rspec_version.content_type in ['*', 'ad']:
- ad_rspec_versions.append(rspec_version.to_dict())
- if rspec_version.content_type in ['*', 'request']:
- request_rspec_versions.append(rspec_version.to_dict())
- return {
- 'testbed': self.testbed_name(),
- 'geni_request_rspec_versions': request_rspec_versions,
- 'geni_ad_rspec_versions': ad_rspec_versions,
- }
-
- def list_slices(self, creds, options):
- # look in cache first
- if self.cache:
- slices = self.cache.get('slices')
- if slices:
- logger.debug("NitosDriver.list_slices returns from cache")
- return slices
-
- # get data from db
- slices = self.shell.getSlices({}, [])
- testbed_name = self.testbedInfo['name']
- slice_hrns = [slicename_to_hrn(self.hrn, testbed_name, slice[
- 'slice_name']) for slice in slices]
- slice_urns = [hrn_to_urn(slice_hrn, 'slice')
- for slice_hrn in slice_hrns]
-
- # cache the result
- if self.cache:
- logger.debug("NitosDriver.list_slices stores value in cache")
- self.cache.add('slices', slice_urns)
-
- return slice_urns
-
- # first 2 args are None in case of resource discovery
- def list_resources(self, slice_urn, slice_hrn, creds, options):
- cached_requested = options.get('cached', True)
- version_manager = VersionManager()
- # get the rspec's return format from options
- #rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
- # rspec's return format for nitos aggregate is version NITOS 1
- rspec_version = version_manager.get_version('NITOS 1')
- version_string = "rspec_%s" % (rspec_version)
-
- # panos adding the info option to the caching key (can be improved)
- if options.get('info'):
- version_string = version_string + \
- "_" + options.get('info', 'default')
-
- # Adding the list_leases option to the caching key
- if options.get('list_leases'):
- version_string = version_string + "_" + \
- options.get('list_leases', 'default')
-
- # Adding geni_available to caching key
- if options.get('geni_available'):
- version_string = version_string + "_" + \
- str(options.get('geni_available'))
-
- # look in cache first
- if cached_requested and self.cache and not slice_hrn:
- rspec = self.cache.get(version_string)
- if rspec:
- logger.debug(
- "NitosDriver.ListResources: returning cached advertisement")
- return rspec
-
- # panos: passing user-defined options
- # print "manager options = ",options
- aggregate = NitosAggregate(self)
- rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
- options=options)
-
- # cache the result
- if self.cache and not slice_hrn:
- logger.debug(
- "NitosDriver.ListResources: stores advertisement in cache")
- self.cache.add(version_string, rspec)
-
- return rspec
-
- def sliver_status(self, slice_urn, slice_hrn):
- # find out where this slice is currently running
- slicename = hrn_to_nitos_slicename(slice_hrn)
-
- slices = self.shell.getSlices({}, [])
- # filter slicename
- if len(slices) == 0:
- raise SliverDoesNotExist(
- "%s (used %s as slicename internally)" % (slice_hrn, slicename))
-
- for slice in slices:
- if slice['slice_name'] == slicename:
- user_slice = slice
- break
-
- if not user_slice:
- raise SliverDoesNotExist(
- "%s (used %s as slicename internally)" % (slice_hrn, slicename))
-
- # report about the reserved nodes only
- reserved_nodes = self.shell.getReservedNodes({}, [])
- nodes = self.shell.getNodes({}, [])
-
- slice_reserved_nodes = []
- for r_node in reserved_nodes:
- if r_node['slice_id'] == slice['slice_id']:
- for node in nodes:
- if node['node_id'] == r_node['node_id']:
- slice_reserved_nodes.append(node)
-
- if len(slice_reserved_nodes) == 0:
- raise SliverDoesNotExist("You have not allocated any slivers here")
-
-# continue from here
- # get login info
- user = {}
- keys = []
- if slice['user_ids']:
- users = self.shell.getUsers()
- # filter users on slice['user_ids']
- for usr in users:
- if usr['user_id'] in slice['user_ids']:
- keys.extend(usr['keys'])
-
- user.update({'urn': slice_urn,
- 'login': slice['slice_name'],
- 'protocol': ['ssh'],
- 'port': ['22'],
- 'keys': keys})
-
- result = {}
- top_level_status = 'unknown'
- if slice_reserved_nodes:
- top_level_status = 'ready'
- result['geni_urn'] = slice_urn
- result['nitos_gateway_login'] = slice['slice_name']
- #result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
- #result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
-
- resources = []
- for node in slice_reserved_nodes:
- res = {}
- res['nitos_hostname'] = node['hostname']
- sliver_id = Xrn(slice_urn, type='slice', id=node['node_id']).urn
- res['geni_urn'] = sliver_id
- res['geni_status'] = 'ready'
- res['geni_error'] = ''
- res['users'] = [user]
-
- resources.append(res)
-
- result['geni_status'] = top_level_status
- result['geni_resources'] = resources
-
- return result
-
- def create_sliver(self, slice_urn, slice_hrn, creds, rspec_string, users, options):
-
- aggregate = NitosAggregate(self)
- slices = NitosSlices(self)
- sfa_peer = slices.get_sfa_peer(slice_hrn)
- slice_record = None
- if users:
- slice_record = users[0].get('slice_record', {})
-
- # parse rspec
- rspec = RSpec(rspec_string, version='NITOS 1')
-
- # ensure slice record exists
- slice = slices.verify_slice(
- slice_hrn, slice_record, sfa_peer, options=options)
- # ensure user records exists
- users = slices.verify_users(
- slice_hrn, slice, users, sfa_peer, options=options)
-
- # add/remove leases (nodes and channels)
- # a lease in Nitos RSpec case is a reservation of nodes and channels
- # grouped by (slice,timeslot)
- rspec_requested_leases = rspec.version.get_leases()
- rspec_requested_nodes = []
- rspec_requested_channels = []
- for lease in rspec_requested_leases:
- if lease['type'] == 'node':
- lease.pop('type', None)
- rspec_requested_nodes.append(lease)
- else:
- lease.pop('type', None)
- rspec_requested_channels.append(lease)
-
- nodes = slices.verify_slice_leases_nodes(slice, rspec_requested_nodes)
- channels = slices.verify_slice_leases_channels(
- slice, rspec_requested_channels)
-
- return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
-
- def delete_sliver(self, slice_urn, slice_hrn, creds, options):
- slicename = hrn_to_nitos_slicename(slice_hrn)
- slices = self.filter_nitos_results(
- self.shell.getSlices({}, []), {'slice_name': slicename})
- if not slices:
- return 1
- slice = slices[0]
-
- slice_reserved_nodes = self.filter_nitos_results(
- self.shell.getReservedNodes({}, []), {'slice_id': slice['slice_id']})
- slice_reserved_channels = self.filter_nitos_results(
- self.shell.getReservedChannels(), {'slice_id': slice['slice_id']})
-
- slice_reserved_nodes_ids = [node['reservation_id']
- for node in slice_reserved_nodes]
- slice_reserved_channels_ids = [
- channel['reservation_id'] for channel in slice_reserved_channels]
-
- # release all reserved nodes and channels for that slice
- try:
- released_nodes = self.shell.releaseNodes(
- {'reservation_ids': slice_reserved_nodes_ids})
- released_channels = self.shell.releaseChannels(
- {'reservation_ids': slice_reserved_channels_ids})
- except:
- pass
- return 1
-
- def renew_sliver(self, slice_urn, slice_hrn, creds, expiration_time, options):
- slicename = hrn_to_nitos_slicename(slice_hrn)
- slices = self.shell.GetSlices({'slicename': slicename}, ['slice_id'])
- if not slices:
- raise RecordNotFound(slice_hrn)
- slice = slices[0]
- requested_time = utcparse(expiration_time)
- record = {'expires': int(datetime_to_epoch(requested_time))}
- try:
- self.shell.UpdateSlice(slice['slice_id'], record)
-
- return True
- except:
- return False
-
- # xxx this code is quite old and has not run for ages
- # it is obviously totally broken and needs a rewrite
- def get_ticket(self, slice_urn, slice_hrn, creds, rspec_string, options):
- raise SfaNotImplemented("NitosDriver.get_ticket needs a rewrite")
-# please keep this code for future reference
-# slices = PlSlices(self)
-# peer = slices.get_peer(slice_hrn)
-# sfa_peer = slices.get_sfa_peer(slice_hrn)
-#
-# # get the slice record
-# credential = api.getCredential()
-# interface = api.registries[api.hrn]
-# registry = api.server_proxy(interface, credential)
-# records = registry.Resolve(xrn, credential)
-#
-# # make sure we get a local slice record
-# record = None
-# for tmp_record in records:
-# if tmp_record['type'] == 'slice' and \
-# not tmp_record['peer_authority']:
-# #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
-# slice_record = SliceRecord(dict=tmp_record)
-# if not record:
-# raise RecordNotFound(slice_hrn)
-#
-# # similar to CreateSliver, we must verify that the required records exist
-# # at this aggregate before we can issue a ticket
-# # parse rspec
-# rspec = RSpec(rspec_string)
-# requested_attributes = rspec.version.get_slice_attributes()
-#
-# # ensure site record exists
-# site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
-# # ensure slice record exists
-# slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
-# # ensure person records exists
-# # xxx users is undefined in this context
-# persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
-# # ensure slice attributes exists
-# slices.verify_slice_attributes(slice, requested_attributes)
-#
-# # get sliver info
-# slivers = slices.get_slivers(slice_hrn)
-#
-# if not slivers:
-# raise SliverDoesNotExist(slice_hrn)
-#
-# # get initscripts
-# initscripts = []
-# data = {
-# 'timestamp': int(time.time()),
-# 'initscripts': initscripts,
-# 'slivers': slivers
-# }
-#
-# # create the ticket
-# object_gid = record.get_gid_object()
-# new_ticket = SfaTicket(subject = object_gid.get_subject())
-# new_ticket.set_gid_caller(api.auth.client_gid)
-# new_ticket.set_gid_object(object_gid)
-# new_ticket.set_issuer(key=api.key, subject=self.hrn)
-# new_ticket.set_pubkey(object_gid.get_pubkey())
-# new_ticket.set_attributes(data)
-# new_ticket.set_rspec(rspec)
-# #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
-# new_ticket.encode()
-# new_ticket.sign()
-#
-# return new_ticket.save_to_string(save_parents=True)
+++ /dev/null
-import sys
-import socket
-from urlparse import urlparse
-
-from sfa.util.sfalogging import logger
-from sfa.util.py23 import xmlrpc_client
-
-
-class NitosShell:
- """
- A simple xmlrpc shell to a NITOS Scheduler instance
- This class can receive all NITOS API calls to the underlying testbed
- For safety this is limited to a set of hard-coded calls
- """
-
- direct_calls = ['getNodes', 'getChannels', 'getSlices', 'getUsers', 'getReservedNodes',
- 'getReservedChannels', 'getTestbedInfo',
- 'reserveNodes', 'reserveChannels', 'addSlice', 'addUser', 'addUserToSlice',
- 'addUserKey', 'addNode', 'addChannel',
- 'updateReservedNodes', 'updateReservedChannels', 'updateSlice', 'updateUser',
- 'updateNode', 'updateChannel',
- 'deleteNode', 'deleteChannel', 'deleteSlice', 'deleteUser', 'deleteUserFromSLice',
- 'deleteKey', 'releaseNodes', 'releaseChannels'
- ]
-
- # use the 'capability' auth mechanism for higher performance when the PLC
- # db is local
- def __init__(self, config):
- url = config.SFA_NITOS_URL
- self.proxy = xmlrpc_client.ServerProxy(
- url, verbose=False, allow_none=True)
-
- def __getattr__(self, name):
- def func(*args, **kwds):
- actual_name = None
- if name in NitosShell.direct_calls:
- actual_name = name
- if not actual_name:
- raise Exception(
- "Illegal method call %s for NITOS driver" % (name))
- actual_name = "scheduler.server." + actual_name
- result = getattr(self.proxy, actual_name)(*args, **kwds)
- logger.debug('NitosShell %s (%s) returned ... ' %
- (name, actual_name))
- return result
- return func
+++ /dev/null
-from collections import defaultdict
-
-from sfa.util.sfatime import utcparse, datetime_to_epoch
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
-
-from sfa.rspecs.rspec import RSpec
-
-from sfa.nitos.nitosxrn import NitosXrn, hrn_to_nitos_slicename, xrn_to_hostname, xrn_to_channel
-
-MAXINT = 2L**31 - 1
-
-
-class NitosSlices:
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_sfa_peer(self, xrn):
- hrn, type = urn_to_hrn(xrn)
-
- # return the authority for this hrn or None if we are the authority
- sfa_peer = None
- slice_authority = get_authority(hrn)
- site_authority = get_authority(slice_authority)
-
- if site_authority != self.driver.hrn:
- sfa_peer = site_authority
-
- return sfa_peer
-
- def verify_slice_leases_nodes(self, slice, rspec_requested_nodes):
- nodes = self.driver.shell.getNodes({}, [])
-
- requested_nodes = []
- for node in rspec_requested_nodes:
- requested_node = {}
- nitos_nodes = []
- nitos_nodes.extend(nodes)
- slice_name = hrn_to_nitos_slicename(node['slice_id'])
- if slice_name != slice['slice_name']:
- continue
- hostname = xrn_to_hostname(node['component_id'])
- nitos_node = self.driver.filter_nitos_results(
- nitos_nodes, {'hostname': hostname})
- if not nitos_node:
- continue
- nitos_node = nitos_node[0]
- # fill the requested node with nitos ids
- requested_node['slice_id'] = slice['slice_id']
- requested_node['node_id'] = nitos_node['node_id']
- requested_node['start_time'] = node['start_time']
- requested_node['end_time'] = str(int(
- node['duration']) * int(self.driver.testbedInfo['grain']) + int(node['start_time']))
- requested_nodes.append(requested_node)
-
- # get actual nodes reservation data for the slice
- reserved_nodes = self.driver.filter_nitos_results(
- self.driver.shell.getReservedNodes({}, []), {'slice_id': slice['slice_id']})
-
- reserved_nodes_by_id = {}
- for node in reserved_nodes:
- reserved_nodes_by_id[node['reservation_id']] = {'slice_id': node['slice_id'],
- 'node_id': node['node_id'], 'start_time': node['start_time'],
- 'end_time': node['end_time']}
-
- added_nodes = []
- kept_nodes_id = []
- deleted_nodes_id = []
- for reservation_id in reserved_nodes_by_id:
- if reserved_nodes_by_id[reservation_id] not in requested_nodes:
- deleted_nodes_id.append(reservation_id)
- else:
- kept_nodes_id.append(reservation_id)
- requested_nodes.remove(reserved_nodes_by_id[reservation_id])
- added_nodes = requested_nodes
-
- try:
- deleted = self.driver.shell.releaseNodes(
- {'reservation_ids': deleted_nodes_id})
- for node in added_nodes:
- added = self.driver.shell.reserveNodes({'slice_id': slice['slice_id'], 'start_time': node[
- 'start_time'], 'end_time': node['end_time'], 'nodes': [node['node_id']]})
-
- except:
- logger.log_exc('Failed to add/remove slice leases nodes')
-
- return added_nodes
-
- def verify_slice_leases_channels(self, slice, rspec_requested_channels):
- channels = self.driver.shell.getChannels({}, [])
-
- requested_channels = []
- for channel in rspec_requested_channels:
- requested_channel = {}
- nitos_channels = []
- nitos_channels.extend(channels)
- slice_name = hrn_to_nitos_slicename(channel['slice_id'])
- if slice_name != slice['slice_name']:
- continue
- channel_num = xrn_to_channel(channel['component_id'])
- nitos_channel = self.driver.filter_nitos_results(
- nitos_channels, {'channel': channel_num})[0]
- # fill the requested channel with nitos ids
- requested_channel['slice_id'] = slice['slice_id']
- requested_channel['channel_id'] = nitos_channel['channel_id']
- requested_channel['start_time'] = channel['start_time']
- requested_channel['end_time'] = str(int(
- channel['duration']) * int(self.driver.testbedInfo['grain']) + int(channel['start_time']))
- requested_channels.append(requested_channel)
-
- # get actual channel reservation data for the slice
- reserved_channels = self.driver.filter_nitos_results(
- self.driver.shell.getReservedChannels(), {'slice_id': slice['slice_id']})
-
- reserved_channels_by_id = {}
- for channel in reserved_channels:
- reserved_channels_by_id[channel['reservation_id']] = {'slice_id': channel['slice_id'],
- 'channel_id': channel['channel_id'], 'start_time': channel['start_time'],
- 'end_time': channel['end_time']}
-
- added_channels = []
- kept_channels_id = []
- deleted_channels_id = []
- for reservation_id in reserved_channels_by_id:
- if reserved_channels_by_id[reservation_id] not in requested_channels:
- deleted_channels_id.append(reservation_id)
- else:
- kept_channels_id.append(reservation_id)
- requested_channels.remove(
- reserved_channels_by_id[reservation_id])
- added_channels = requested_channels
-
- try:
- deleted = self.driver.shell.releaseChannels(
- {'reservation_ids': deleted_channels_id})
- for channel in added_channels:
- added = self.driver.shell.reserveChannels({'slice_id': slice['slice_id'], 'start_time': channel[
- 'start_time'], 'end_time': channel['end_time'], 'channels': [channel['channel_id']]})
-
- except:
- logger.log_exc('Failed to add/remove slice leases channels')
-
- return added_channels
-
- def free_egre_key(self):
- used = set()
- for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
- used.add(int(tag['value']))
-
- for i in range(1, 256):
- if i not in used:
- key = i
- break
- else:
- raise KeyError("No more EGRE keys available")
-
- return str(key)
-
- def verify_slice(self, slice_hrn, slice_record, sfa_peer, options=None):
- if options is None:
- options = {}
- slicename = hrn_to_nitos_slicename(slice_hrn)
- slices = self.driver.shell.getSlices({}, [])
- slices = self.driver.filter_nitos_results(
- slices, {'slice_name': slicename})
- if not slices:
- slice = {'slice_name': slicename}
- # add the slice
- slice['slice_id'] = self.driver.shell.addSlice(slice)
- slice['node_ids'] = []
- slice['user_ids'] = []
- else:
- slice = slices[0]
-
- return slice
-
- def verify_users(self, slice_hrn, slice_record, users, sfa_peer, options=None):
- if options is None:
- options = {}
- # get slice info
- slicename = hrn_to_nitos_slicename(slice_hrn)
- slices = self.driver.shell.getSlices({}, [])
- slice = self.driver.filter_nitos_results(
- slices, {'slice_name': slicename})[0]
- added_users = []
- # get users info
- users_info = []
- for user in users:
- user_urn = user['urn']
- user_hrn, type = urn_to_hrn(user_urn)
- username = str(user_hrn).split('.')[-1]
- email = user['email']
- # look for the user according to his username, email...
- nitos_users = self.driver.filter_nitos_results(
- self.driver.shell.getUsers(), {'username': username})
- if not nitos_users:
- nitos_users = self.driver.filter_nitos_results(
- self.driver.shell.getUsers(), {'email': email})
-
- if not nitos_users:
- # create the user
- user_id = self.driver.shell.addUser(
- {'username': email.split('@')[0], 'email': email})
- added_users.append(user_id)
- # add user keys
- for key in user['keys']:
- self.driver.shell.addUserKey(
- {'user_id': user_id, 'key': key, 'slice_id': slice['slice_id']})
- # add the user to the slice
- self.driver.shell.addUserToSlice(
- {'slice_id': slice['slice_id'], 'user_id': user_id})
- else:
- # check if the users are in the slice
- for user in nitos_users:
- if not user['user_id'] in slice['user_ids']:
- self.driver.shell.addUserToSlice(
- {'slice_id': slice['slice_id'], 'user_id': user['user_id']})
-
- return added_users
-
- def verify_keys(self, persons, users, options=None):
- if options is None:
- options = {}
- # existing keys
- key_ids = []
- for person in persons:
- key_ids.extend(person['key_ids'])
- keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
- keydict = {}
- for key in keylist:
- keydict[key['key']] = key['key_id']
- existing_keys = keydict.keys()
- persondict = {}
- for person in persons:
- persondict[person['email']] = person
-
- # add new keys
- requested_keys = []
- updated_persons = []
- for user in users:
- user_keys = user.get('keys', [])
- updated_persons.append(user)
- for key_string in user_keys:
- requested_keys.append(key_string)
- if key_string not in existing_keys:
- key = {'key': key_string, 'key_type': 'ssh'}
- try:
- if peer:
- person = persondict[user['email']]
- self.driver.shell.UnBindObjectFromPeer(
- 'person', person['person_id'], peer['shortname'])
- key['key_id'] = self.driver.shell.AddPersonKey(
- user['email'], key)
- if peer:
- key_index = user_keys.index(key['key'])
- remote_key_id = user['key_ids'][key_index]
- self.driver.shell.BindObjectToPeer('key', key['key_id'], peer[
- 'shortname'], remote_key_id)
-
- finally:
- if peer:
- self.driver.shell.BindObjectToPeer('person', person['person_id'], peer[
- 'shortname'], user['person_id'])
-
- # remove old keys (only if we are not appending)
- append = options.get('append', True)
- if append == False:
- removed_keys = set(existing_keys).difference(requested_keys)
- for existing_key_id in keydict:
- if keydict[existing_key_id] in removed_keys:
- try:
- if peer:
- self.driver.shell.UnBindObjectFromPeer(
- 'key', existing_key_id, peer['shortname'])
- self.driver.shell.DeleteKey(existing_key_id)
- except:
- pass
+++ /dev/null
-from __future__ import print_function
-
-# specialized Xrn class for NITOS
-import re
-from sfa.util.xrn import Xrn
-
-# temporary helper functions to use this module instead of namespace
-
-
-def hostname_to_hrn(auth, login_base, hostname):
- return NitosXrn(auth=auth + '.' + login_base, hostname=hostname).get_hrn()
-
-
-def hostname_to_urn(auth, login_base, hostname):
- return NitosXrn(auth=auth + '.' + login_base, hostname=hostname).get_urn()
-
-
-def slicename_to_hrn(auth_hrn, site_name, slicename):
- return NitosXrn(auth=auth_hrn + '.' + site_name, slicename=slicename).get_hrn()
-# hack to convert nitos user name to hrn
-
-
-def username_to_hrn(auth_hrn, site_name, username):
- return NitosXrn(auth=auth_hrn + '.' + site_name, slicename=username).get_hrn()
-
-
-def email_to_hrn(auth_hrn, email):
- return NitosXrn(auth=auth_hrn, email=email).get_hrn()
-
-
-def hrn_to_nitos_slicename(hrn):
- return NitosXrn(xrn=hrn, type='slice').nitos_slicename()
-# removed-dangerous - was used for non-slice objects
-# def hrn_to_nitos_login_base (hrn):
-# return NitosXrn(xrn=hrn,type='slice').nitos_login_base()
-
-
-def hrn_to_nitos_authname(hrn):
- return NitosXrn(xrn=hrn, type='any').nitos_authname()
-
-
-def xrn_to_hostname(hrn):
- return Xrn.unescape(NitosXrn(xrn=hrn, type='node').get_leaf())
-
-
-def channel_to_hrn(auth, login_base, channel):
- return NitosXrn(auth=auth + '.' + login_base, channel=channel).get_hrn()
-
-
-def channel_to_urn(auth, login_base, channel):
- return NitosXrn(auth=auth + '.' + login_base, channel=channel).get_urn()
-
-
-def xrn_to_channel(hrn):
- return Xrn.unescape(NitosXrn(xrn=hrn, type='channel').get_leaf())
-
-
-class NitosXrn (Xrn):
-
- @staticmethod
- def site_hrn(auth, login_base):
- return '.'.join([auth, login_base])
-
- def __init__(self, auth=None, hostname=None, slicename=None, email=None, interface=None, channel=None, **kwargs):
- # def hostname_to_hrn(auth_hrn, login_base, hostname):
- if hostname is not None:
- self.type = 'node'
- # keep only the first part of the DNS name
- #self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
- # escape the '.' in the hostname
- self.hrn = '.'.join([auth, Xrn.escape(hostname)])
- self.hrn_to_urn()
- # def slicename_to_hrn(auth_hrn, slicename):
- elif slicename is not None:
- self.type = 'slice'
- self.hrn = ".".join([auth] + [slicename.replace(".", "_")])
- self.hrn_to_urn()
- # def email_to_hrn(auth_hrn, email):
- elif email is not None:
- self.type = 'person'
- # keep only the part before '@' and replace special chars into _
- self.hrn = '.'.join(
- [auth, email.split('@')[0].replace(".", "_").replace("+", "_")])
- self.hrn_to_urn()
- elif interface is not None:
- self.type = 'interface'
- self.hrn = auth + '.' + interface
- self.hrn_to_urn()
- elif channel is not None:
- self.type = 'channel'
- self.hrn = ".".join([auth] + [channel])
- self.hrn_to_urn()
- else:
- Xrn.__init__(self, **kwargs)
-
- # def hrn_to_pl_slicename(hrn):
- def nitos_slicename(self):
- self._normalize()
- leaf = self.leaf
- sliver_id_parts = leaf.split(':')
- name = sliver_id_parts[0]
- name = re.sub('[^a-zA-Z0-9_]', '', name)
- # return self.nitos_login_base() + '_' + name
- return name
-
- # def hrn_to_pl_authname(hrn):
- def nitos_authname(self):
- self._normalize()
- return self.authority[-1]
-
- def interface_name(self):
- self._normalize()
- return self.leaf
-
- def nitos_login_base(self):
- self._normalize()
- if self.type and self.type.startswith('authority'):
- base = self.leaf
- else:
- base = self.authority[-1]
-
- # Fix up names of GENI Federates
- base = base.lower()
- base = re.sub('\\\[^a-zA-Z0-9]', '', base)
-
- if len(base) > 20:
- base = base[len(base) - 20:]
-
- return base
-
-
-if __name__ == '__main__':
-
- #nitosxrn = NitosXrn(auth="omf.nitos",slicename="aminesl")
- #slice_hrn = nitosxrn.get_hrn()
- #slice_name = NitosXrn(xrn="omf.nitos.aminesl",type='slice').nitos_slicename()
- slicename = "giorgos_n"
- hrn = slicename_to_hrn("pla", "nitos", slicename)
- print(hrn)
+++ /dev/null
-from sfa.util.sfalogging import logger
-from keystoneclient.v2_0 import client as keystone_client
-from glance import client as glance_client
-from novaclient.v1_1 import client as nova_client
-from sfa.util.config import Config
-
-
-def parse_novarc(filename):
- opts = {}
- f = open(filename, 'r')
- for line in f:
- try:
- line = line.replace('export', '').strip()
- parts = line.split('=')
- if len(parts) > 1:
- value = parts[1].replace("\'", "")
- value = value.replace('\"', '')
- opts[parts[0]] = value
- except:
- pass
- f.close()
- return opts
-
-
-class KeystoneClient:
-
- def __init__(self, username=None, password=None, tenant=None, url=None, config=None):
- if not config:
- config = Config()
- opts = parse_novarc(config.SFA_NOVA_NOVARC)
- if username:
- opts['OS_USERNAME'] = username
- if password:
- opts['OS_PASSWORD'] = password
- if tenant:
- opts['OS_TENANT_NAME'] = tenant
- if url:
- opts['OS_AUTH_URL'] = url
- self.opts = opts
- self.client = keystone_client.Client(username=opts.get('OS_USERNAME'),
- password=opts.get('OS_PASSWORD'),
- tenant_name=opts.get(
- 'OS_TENANT_NAME'),
- auth_url=opts.get('OS_AUTH_URL'))
-
- def connect(self, *args, **kwds):
- self.__init__(*args, **kwds)
-
- def __getattr__(self, name):
- return getattr(self.client, name)
-
-
-class GlanceClient:
-
- def __init__(self, config=None):
- if not config:
- config = Config()
- opts = parse_novarc(config.SFA_NOVA_NOVARC)
- self.client = glance_client.get_client(host='0.0.0.0',
- username=opts.get(
- 'OS_USERNAME'),
- password=opts.get(
- 'OS_PASSWORD'),
- tenant=opts.get(
- 'OS_TENANT_NAME'),
- auth_url=opts.get('OS_AUTH_URL'))
-
- def __getattr__(self, name):
- return getattr(self.client, name)
-
-
-class NovaClient:
-
- def __init__(self, username=None, password=None, tenant=None, url=None, config=None):
- if not config:
- config = Config()
- opts = parse_novarc(config.SFA_NOVA_NOVARC)
- if username:
- opts['OS_USERNAME'] = username
- if password:
- opts['OS_PASSWORD'] = password
- if tenant:
- opts['OS_TENANT_NAME'] = tenant
- if url:
- opts['OS_AUTH_URL'] = url
- self.opts = opts
- self.client = nova_client.Client(username=opts.get('OS_USERNAME'),
- api_key=opts.get('OS_PASSWORD'),
- project_id=opts.get('OS_TENANT_NAME'),
- auth_url=opts.get('OS_AUTH_URL'),
- region_name='',
- extensions=[],
- service_type='compute',
- service_name='',
- )
-
- def connect(self, *args, **kwds):
- self.__init__(*args, **kwds)
-
- def __getattr__(self, name):
- return getattr(self.client, name)
+++ /dev/null
-try:
- import boto
- from boto.ec2.regioninfo import RegionInfo
- from boto.exception import EC2ResponseError
- has_boto = True
-except:
- has_boto = False
-
-from sfa.util.sfalogging import logger
-from sfa.openstack.nova_shell import NovaShell
-from sfa.util.config import Config
-
-
-class EucaShell:
- """
- A xmlrpc connection to the euca api.
- """
-
- def __init__(self, config):
- self.config = config
- self.nova_shell = NovaShell(config)
- self.access_key = None
- self.secret_key = None
-
- def init_context(self, project_name=None):
-
- # use the context of the specified project's project
- # manager.
- if project_name:
- project = self.nova_shell.auth_manager.get_project(project_name)
- self.access_key = "%s:%s" % (
- project.project_manager.name, project_name)
- self.secret_key = project.project_manager.secret
- else:
- # use admin user's context
- admin_user = self.nova_shell.auth_manager.get_user(
- self.config.SFA_NOVA_USER)
- #access_key = admin_user.access
- self.access_key = '%s' % admin_user.name
- self.secret_key = admin_user.secret
-
- def get_euca_connection(self, project_name=None):
- if not has_boto:
- logger.info('Unable to access EC2 API - boto library not found.')
- return None
-
- if not self.access_key or not self.secret_key:
- self.init_context(project_name)
-
- url = self.config.SFA_NOVA_API_URL
- host = None
- port = None
- path = "/"
- use_ssl = False
- # Split the url into parts
- if url.find('https://') >= 0:
- use_ssl = True
- url = url.replace('https://', '')
- elif url.find('http://') >= 0:
- use_ssl = False
- url = url.replace('http://', '')
- parts = url.split(':')
- host = parts[0]
- if len(parts) > 1:
- parts = parts[1].split('/')
- port = int(parts[0])
- parts = parts[1:]
- path = '/' + '/'.join(parts)
- return boto.connect_ec2(aws_access_key_id=self.access_key,
- aws_secret_access_key=self.secret_key,
- is_secure=use_ssl,
- region=RegionInfo(None, 'eucalyptus', host),
- host=host,
- port=port,
- path=path)
-
- def __getattr__(self, name):
- def func(*args, **kwds):
- conn = self.get_euca_connection()
+++ /dev/null
-from nova.exception import ImageNotFound
-from sfa.rspecs.elements.disk_image import DiskImage
-
-
-class Image:
-
- def __init__(self, image=None):
- if image is None:
- image = {}
- self.id = None
- self.container_format = None
- self.kernel_id = None
- self.ramdisk_id = None
- self.properties = None
- self.name = None
- self.description = None
- self.os = None
- self.version = None
-
- if image:
- self.parse_image(image)
-
- def parse_image(self, image):
- if isinstance(image, dict):
- self.id = image['id']
- self.name = image['name']
- self.container_format = image['container_format']
- self.properties = image['properties']
- if 'kernel_id' in self.properties:
- self.kernel_id = self.properties['kernel_id']
- if 'ramdisk_id' in self.properties:
- self.ramdisk_id = self.properties['ramdisk_id']
-
- def to_rspec_object(self):
- img = DiskImage()
- img['name'] = self.name
- img['description'] = self.name
- img['os'] = self.name
- img['version'] = self.name
- return img
-
-
-class ImageManager:
-
- def __init__(self, driver):
- self.driver = driver
-
- @staticmethod
- def disk_image_to_rspec_object(image):
- img = Image(image)
- return img.to_rspec_object()
-
- def get_available_disk_images(self):
- # get image records
- disk_images = []
- for img in self.driver.shell.image_manager.get_images_detailed():
- image = Image(img)
- if image.container_format in ['ami', 'ovf']:
- disk_images.append(image)
- return disk_images
-
- def get_disk_image(self, id=None, name=None):
- """
- Look up a image bundle using the specifeid id or name
- """
- disk_image = None
- try:
- if id:
- image = self.driver.shell.nova_manager.images.find(id=id)
- elif name:
- image = self.driver.shell.nova_manager.images.find(name=name)
- except ImageNotFound:
- pass
- return Image(image)
+++ /dev/null
-import time
-import datetime
-
-from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
- RecordNotFound, SfaNotImplemented, SfaInvalidArgument, UnsupportedOperation
-
-from sfa.util.sfalogging import logger
-from sfa.util.defaultdict import defaultdict
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf
-from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename, hrn_to_os_tenant_name
-from sfa.util.cache import Cache
-from sfa.trust.credential import Credential
-# used to be used in get_ticket
-#from sfa.trust.sfaticket import SfaTicket
-from sfa.rspecs.version_manager import VersionManager
-from sfa.rspecs.rspec import RSpec
-from sfa.storage.model import RegRecord, SliverAllocation
-
-# the driver interface, mostly provides default behaviours
-from sfa.managers.driver import Driver
-from sfa.openstack.shell import Shell
-from sfa.openstack.osaggregate import OSAggregate
-from sfa.planetlab.plslices import PlSlices
-
-
-def list_to_dict(recs, key):
- """
- convert a list of dictionaries into a dictionary keyed on the
- specified dictionary key
- """
- return dict([(rec[key], rec) for rec in recs])
-
-#
-# PlShell is just an xmlrpc serverproxy where methods
-# can be sent as-is; it takes care of authentication
-# from the global config
-#
-
-
-class NovaDriver(Driver):
-
- # the cache instance is a class member so it survives across incoming
- # requests
- cache = None
-
- def __init__(self, api):
- Driver.__init__(self, api)
- config = api.config
- self.shell = Shell(config=config)
- self.cache = None
- if config.SFA_AGGREGATE_CACHING:
- if NovaDriver.cache is None:
- NovaDriver.cache = Cache()
- self.cache = NovaDriver.cache
-
- def sliver_to_slice_xrn(self, xrn):
- sliver_id_parts = Xrn(xrn).get_sliver_id_parts()
- slice = self.shell.auth_manager.tenants.find(id=sliver_id_parts[0])
- if not slice:
- raise Forbidden(
- "Unable to locate slice record for sliver: %s" % xrn)
- slice_xrn = OSXrn(name=slice.name, type='slice')
- return slice_xrn
-
- def check_sliver_credentials(self, creds, urns):
- # build list of cred object hrns
- slice_cred_names = []
- for cred in creds:
- slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn()
- slice_cred_names.append(OSXrn(xrn=slice_cred_hrn).get_slicename())
-
- # look up slice name of slivers listed in urns arg
- slice_ids = []
- for urn in urns:
- sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts()
- slice_ids.append(sliver_id_parts[0])
-
- if not slice_ids:
- raise Forbidden("sliver urn not provided")
-
- sliver_names = []
- for slice_id in slice_ids:
- slice = self.shell.auth_manager.tenants.find(slice_id)
- sliver_names.append(slice['name'])
-
- # make sure we have a credential for every specified sliver ierd
- for sliver_name in sliver_names:
- if sliver_name not in slice_cred_names:
- msg = "Valid credential not found for target: %s" % sliver_name
- raise Forbidden(msg)
-
- ########################################
- # registry oriented
- ########################################
-
- # disabled users
- def is_enabled(self, record):
- # all records are enabled
- return True
-
- def augment_records_with_testbed_info(self, sfa_records):
- return self.fill_record_info(sfa_records)
-
- ##########
- def register(self, sfa_record, hrn, pub_key):
-
- if sfa_record['type'] == 'slice':
- record = self.register_slice(sfa_record, hrn)
- elif sfa_record['type'] == 'user':
- record = self.register_user(sfa_record, hrn, pub_key)
- elif sfa_record['type'].startswith('authority'):
- record = self.register_authority(sfa_record, hrn)
- # We should be returning the records id as a pointer but
- # this is a string and the records table expects this to be an
- # int.
- # return record.id
- return -1
-
- def register_slice(self, sfa_record, hrn):
- # add slice description, name, researchers, PI
- name = hrn_to_os_tenant_name(hrn)
- description = sfa_record.get('description', None)
- self.shell.auth_manager.tenants.create(name, description)
- tenant = self.shell.auth_manager.tenants.find(name=name)
- auth_hrn = OSXrn(xrn=hrn, type='slice').get_authority_hrn()
- parent_tenant_name = OSXrn(
- xrn=auth_hrn, type='slice').get_tenant_name()
- parent_tenant = self.shell.auth_manager.tenants.find(
- name=parent_tenant_name)
- researchers = sfa_record.get('researchers', [])
- for researcher in researchers:
- name = Xrn(researcher).get_leaf()
- user = self.shell.auth_manager.users.find(name=name)
- self.shell.auth_manager.roles.add_user_role(user, 'Member', tenant)
- self.shell.auth_manager.roles.add_user_role(user, 'user', tenant)
-
- pis = sfa_record.get('pis', [])
- for pi in pis:
- name = Xrn(pi).get_leaf()
- user = self.shell.auth_manager.users.find(name=name)
- self.shell.auth_manager.roles.add_user_role(user, 'pi', tenant)
- self.shell.auth_manager.roles.add_user_role(
- user, 'pi', parent_tenant)
-
- return tenant
-
- def register_user(self, sfa_record, hrn, pub_key):
- # add person roles, projects and keys
- email = sfa_record.get('email', None)
- xrn = Xrn(hrn)
- name = xrn.get_leaf()
- auth_hrn = xrn.get_authority_hrn()
- tenant_name = OSXrn(xrn=auth_hrn, type='authority').get_tenant_name()
- tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
- self.shell.auth_manager.users.create(
- name, email=email, tenant_id=tenant.id)
- user = self.shell.auth_manager.users.find(name=name)
- slices = sfa_records.get('slices', [])
- for slice in projects:
- slice_tenant_name = OSXrn(
- xrn=slice, type='slice').get_tenant_name()
- slice_tenant = self.shell.auth_manager.tenants.find(
- name=slice_tenant_name)
- self.shell.auth_manager.roles.add_user_role(
- user, slice_tenant, 'user')
- keys = sfa_records.get('keys', [])
- for key in keys:
- keyname = OSXrn(xrn=hrn, type='user').get_slicename()
- self.shell.nova_client.keypairs.create(keyname, key)
- return user
-
- def register_authority(self, sfa_record, hrn):
- name = OSXrn(xrn=hrn, type='authority').get_tenant_name()
- self.shell.auth_manager.tenants.create(
- name, sfa_record.get('description', ''))
- tenant = self.shell.auth_manager.tenants.find(name=name)
- return tenant
-
- ##########
- # xxx actually old_sfa_record comes filled with plc stuff as well in the
- # original code
- def update(self, old_sfa_record, new_sfa_record, hrn, new_key):
- type = new_sfa_record['type']
-
- # new_key implemented for users only
- if new_key and type not in ['user']:
- raise UnknownSfaType(type)
-
- elif type == "slice":
- # can update project manager and description
- name = hrn_to_os_slicename(hrn)
- researchers = sfa_record.get('researchers', [])
- pis = sfa_record.get('pis', [])
- project_manager = None
- description = sfa_record.get('description', None)
- if pis:
- project_manager = Xrn(pis[0], 'user').get_leaf()
- elif researchers:
- project_manager = Xrn(researchers[0], 'user').get_leaf()
- self.shell.auth_manager.modify_project(
- name, project_manager, description)
-
- elif type == "user":
- # can techinally update access_key and secret_key,
- # but that is not in our scope, so we do nothing.
- pass
- return True
-
- ##########
- def remove(self, sfa_record):
- type = sfa_record['type']
- if type == 'user':
- name = Xrn(sfa_record['hrn']).get_leaf()
- if self.shell.auth_manager.get_user(name):
- self.shell.auth_manager.delete_user(name)
- elif type == 'slice':
- name = hrn_to_os_slicename(sfa_record['hrn'])
- if self.shell.auth_manager.get_project(name):
- self.shell.auth_manager.delete_project(name)
- return True
-
- ####################
- def fill_record_info(self, records):
- """
- Given a (list of) SFA record, fill in the PLC specific
- and SFA specific fields in the record.
- """
- if not isinstance(records, list):
- records = [records]
-
- for record in records:
- if record['type'] == 'user':
- record = self.fill_user_record_info(record)
- elif record['type'] == 'slice':
- record = self.fill_slice_record_info(record)
- elif record['type'].startswith('authority'):
- record = self.fill_auth_record_info(record)
- else:
- continue
- record['geni_urn'] = hrn_to_urn(record['hrn'], record['type'])
- record['geni_certificate'] = record['gid']
- # if os_record.created_at is not None:
- # record['date_created'] = datetime_to_string(utcparse(os_record.created_at))
- # if os_record.updated_at is not None:
- # record['last_updated'] = datetime_to_string(utcparse(os_record.updated_at))
-
- return records
-
- def fill_user_record_info(self, record):
- xrn = Xrn(record['hrn'])
- name = xrn.get_leaf()
- record['name'] = name
- user = self.shell.auth_manager.users.find(name=name)
- record['email'] = user.email
- tenant = self.shell.auth_manager.tenants.find(id=user.tenantId)
- slices = []
- all_tenants = self.shell.auth_manager.tenants.list()
- for tmp_tenant in all_tenants:
- if tmp_tenant.name.startswith(tenant.name + "."):
- for tmp_user in tmp_tenant.list_users():
- if tmp_user.name == user.name:
- slice_hrn = ".".join([self.hrn, tmp_tenant.name])
- slices.append(slice_hrn)
- record['slices'] = slices
- roles = self.shell.auth_manager.roles.roles_for_user(user, tenant)
- record['roles'] = [role.name for role in roles]
- keys = self.shell.nova_manager.keypairs.findall(name=record['hrn'])
- record['keys'] = [key.public_key for key in keys]
- return record
-
- def fill_slice_record_info(self, record):
- tenant_name = hrn_to_os_tenant_name(record['hrn'])
- tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
- parent_tenant_name = OSXrn(xrn=tenant_name).get_authority_hrn()
- parent_tenant = self.shell.auth_manager.tenants.find(
- name=parent_tenant_name)
- researchers = []
- pis = []
-
- # look for users and pis in slice tenant
- for user in tenant.list_users():
- for role in self.shell.auth_manager.roles.roles_for_user(user, tenant):
- if role.name.lower() == 'pi':
- user_tenant = self.shell.auth_manager.tenants.find(
- id=user.tenantId)
- hrn = ".".join([self.hrn, user_tenant.name, user.name])
- pis.append(hrn)
- elif role.name.lower() in ['user', 'member']:
- user_tenant = self.shell.auth_manager.tenants.find(
- id=user.tenantId)
- hrn = ".".join([self.hrn, user_tenant.name, user.name])
- researchers.append(hrn)
-
- # look for pis in the slice's parent (site/organization) tenant
- for user in parent_tenant.list_users():
- for role in self.shell.auth_manager.roles.roles_for_user(user, parent_tenant):
- if role.name.lower() == 'pi':
- user_tenant = self.shell.auth_manager.tenants.find(
- id=user.tenantId)
- hrn = ".".join([self.hrn, user_tenant.name, user.name])
- pis.append(hrn)
- record['name'] = tenant_name
- record['description'] = tenant.description
- record['PI'] = pis
- if pis:
- record['geni_creator'] = pis[0]
- else:
- record['geni_creator'] = None
- record['researcher'] = researchers
- return record
-
- def fill_auth_record_info(self, record):
- tenant_name = hrn_to_os_tenant_name(record['hrn'])
- tenant = self.shell.auth_manager.tenants.find(name=tenant_name)
- researchers = []
- pis = []
-
- # look for users and pis in slice tenant
- for user in tenant.list_users():
- for role in self.shell.auth_manager.roles.roles_for_user(user, tenant):
- hrn = ".".join([self.hrn, tenant.name, user.name])
- if role.name.lower() == 'pi':
- pis.append(hrn)
- elif role.name.lower() in ['user', 'member']:
- researchers.append(hrn)
-
- # look for slices
- slices = []
- all_tenants = self.shell.auth_manager.tenants.list()
- for tmp_tenant in all_tenants:
- if tmp_tenant.name.startswith(tenant.name + "."):
- slices.append(".".join([self.hrn, tmp_tenant.name]))
-
- record['name'] = tenant_name
- record['description'] = tenant.description
- record['PI'] = pis
- record['enabled'] = tenant.enabled
- record['researchers'] = researchers
- record['slices'] = slices
- return record
-
- ####################
- # plcapi works by changes, compute what needs to be added/deleted
- def update_relation(self, subject_type, target_type, subject_id, target_ids):
- # hard-wire the code for slice/user for now, could be smarter if needed
- if subject_type == 'slice' and target_type == 'user':
- subject = self.shell.project_get(subject_id)[0]
- current_target_ids = [user.name for user in subject.members]
- add_target_ids = list(
- set(target_ids).difference(current_target_ids))
- del_target_ids = list(
- set(current_target_ids).difference(target_ids))
- logger.debug("subject_id = %s (type=%s)" %
- (subject_id, type(subject_id)))
- for target_id in add_target_ids:
- self.shell.project_add_member(target_id, subject_id)
- logger.debug("add_target_id = %s (type=%s)" %
- (target_id, type(target_id)))
- for target_id in del_target_ids:
- logger.debug("del_target_id = %s (type=%s)" %
- (target_id, type(target_id)))
- self.shell.project_remove_member(target_id, subject_id)
- else:
- logger.info('unexpected relation to maintain, %s -> %s' %
- (subject_type, target_type))
-
- ########################################
- # aggregate oriented
- ########################################
-
- def testbed_name(self): return "openstack"
-
- def aggregate_version(self):
- return {}
-
- # first 2 args are None in case of resource discovery
- def list_resources(self, version=None, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- rspec = aggregate.list_resources(version=version, options=options)
- return rspec
-
- def describe(self, urns, version=None, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- return aggregate.describe(urns, version=version, options=options)
-
- def status(self, urns, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- desc = aggregate.describe(urns)
- status = {'geni_urn': desc['geni_urn'],
- 'geni_slivers': desc['geni_slivers']}
- return status
-
- def allocate(self, urn, rspec_string, expiration, options=None):
- if options is None:
- options = {}
- xrn = Xrn(urn)
- aggregate = OSAggregate(self)
-
- # assume first user is the caller and use their context
- # for the ec2/euca api connection. Also, use the first users
- # key as the project key.
- key_name = None
- if len(users) > 1:
- key_name = aggregate.create_instance_key(xrn.get_hrn(), users[0])
-
- # collect public keys
- users = options.get('geni_users', [])
- pubkeys = []
- for user in users:
- pubkeys.extend(user['keys'])
-
- rspec = RSpec(rspec_string)
- instance_name = hrn_to_os_slicename(slice_hrn)
- tenant_name = OSXrn(xrn=slice_hrn, type='slice').get_tenant_name()
- slivers = aggregate.run_instances(instance_name, tenant_name,
- rspec_string, key_name, pubkeys)
-
- # update all sliver allocation states setting then to geni_allocated
- sliver_ids = [sliver.id for sliver in slivers]
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(
- sliver_ids, 'geni_provisioned', dbsession)
-
- return aggregate.describe(urns=[urn], version=rspec.version)
-
- def provision(self, urns, options=None):
- if options is None:
- options = {}
- # update sliver allocation states and set them to geni_provisioned
- aggregate = OSAggregate(self)
- instances = aggregate.get_instances(urns)
- sliver_ids = []
- for instance in instances:
- sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id)
- sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn)
- dbsession = self.api.dbsession()
- SliverAllocation.set_allocations(
- sliver_ids, 'geni_provisioned', dbsession)
- version_manager = VersionManager()
- rspec_version = version_manager.get_version(
- options['geni_rspec_version'])
- return self.describe(urns, rspec_version, options=options)
-
- def delete(self, urns, options=None):
- if options is None:
- options = {}
- # collect sliver ids so we can update sliver allocation states after
- # we remove the slivers.
- aggregate = OSAggregate(self)
- instances = aggregate.get_instances(urns)
- sliver_ids = []
- for instance in instances:
- sliver_hrn = "%s.%s" % (self.driver.hrn, instance.id)
- sliver_ids.append(Xrn(sliver_hrn, type='sliver').urn)
-
- # delete the instance
- aggregate.delete_instance(instance)
-
- # delete sliver allocation states
- dbsession = self.api.dbsession()
- SliverAllocation.delete_allocations(sliver_ids, dbsession)
-
- # return geni_slivers
- geni_slivers = []
- for sliver_id in sliver_ids:
- geni_slivers.append(
- {'geni_sliver_urn': sliver['sliver_id'],
- 'geni_allocation_status': 'geni_unallocated',
- 'geni_expires': None})
- return geni_slivers
-
- def renew(self, urns, expiration_time, options=None):
- if options is None:
- options = {}
- description = self.describe(urns, None, options)
- return description['geni_slivers']
-
- def perform_operational_action(self, urns, action, options=None):
- if options is None:
- options = {}
- aggregate = OSAggregate(self)
- action = action.lower()
- if action == 'geni_start':
- action_method = aggregate.start_instances
- elif action == 'geni_stop':
- action_method = aggregate.stop_instances
- elif action == 'geni_restart':
- action_method = aggreate.restart_instances
- else:
- raise UnsupportedOperation(action)
-
- # fault if sliver is not full allocated (operational status is
- # geni_pending_allocation)
- description = self.describe(urns, None, options)
- for sliver in description['geni_slivers']:
- if sliver['geni_operational_status'] == 'geni_pending_allocation':
- raise UnsupportedOperation(
- action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)")
- #
- # Perform Operational Action Here
- #
-
- instances = aggregate.get_instances(urns)
- for instance in instances:
- tenant_name = self.driver.shell.auth_manager.client.tenant_name
- action_method(tenant_name, instance.name, instance.id)
- description = self.describe(urns)
- geni_slivers = self.describe(urns, None, options)['geni_slivers']
- return geni_slivers
-
- def shutdown(self, xrn, options=None):
- if options is None:
- options = {}
- xrn = OSXrn(xrn=xrn, type='slice')
- tenant_name = xrn.get_tenant_name()
- name = xrn.get_slicename()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.driver.shell.nova_manager.servers.findall(name=name)
- for instance in instances:
- self.driver.shell.nova_manager.servers.shutdown(instance)
- return True
+++ /dev/null
-
-import os
-import socket
-import base64
-import string
-import random
-import time
-from collections import defaultdict
-from nova.exception import ImageNotFound
-from nova.api.ec2.cloud import CloudController
-from sfa.util.faults import SliverDoesNotExist
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-from sfa.rspecs.rspec import RSpec
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.node import Node
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.login import Login
-from sfa.rspecs.elements.disk_image import DiskImage
-from sfa.rspecs.elements.services import Services
-from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.fw_rule import FWRule
-from sfa.util.xrn import Xrn
-from sfa.planetlab.plxrn import PlXrn
-from sfa.openstack.osxrn import OSXrn, hrn_to_os_slicename
-from sfa.rspecs.version_manager import VersionManager
-from sfa.openstack.security_group import SecurityGroup
-from sfa.client.multiclient import MultiClient
-from sfa.util.sfalogging import logger
-
-
-def pubkeys_to_user_data(pubkeys):
- user_data = "#!/bin/bash\n\n"
- for pubkey in pubkeys:
- pubkey = pubkey.replace('\n', '')
- user_data += "echo %s >> /root/.ssh/authorized_keys" % pubkey
- user_data += "\n"
- user_data += "echo >> /root/.ssh/authorized_keys"
- user_data += "\n"
- return user_data
-
-
-def image_to_rspec_disk_image(image):
- img = DiskImage()
- img['name'] = image['name']
- img['description'] = image['name']
- img['os'] = image['name']
- img['version'] = image['name']
- return img
-
-
-class OSAggregate:
-
- def __init__(self, driver):
- self.driver = driver
-
- def get_availability_zones(self):
- zones = self.driver.shell.nova_manager.dns_domains.domains()
- if not zones:
- zones = ['cloud']
- else:
- zones = [zone.name for zone in zones]
- return zones
-
- def list_resources(self, version=None, options=None):
- if options is None:
- options = {}
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'ad')
- rspec = RSpec(version=version, user_options=options)
- nodes = self.get_aggregate_nodes()
- rspec.version.add_nodes(nodes)
- return rspec.toxml()
-
- def describe(self, urns, version=None, options=None):
- if options is None:
- options = {}
- # update nova connection
- tenant_name = OSXrn(xrn=urns[0], type='slice').get_tenant_name()
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- instances = self.get_instances(urns)
- # lookup the sliver allocations
- sliver_ids = [sliver['sliver_id'] for sliver in slivers]
- constraint = SliverAllocation.sliver_id.in_(sliver_ids)
- sliver_allocations = self.driver.api.dbsession().query(
- SliverAllocation).filter(constraint)
- sliver_allocation_dict = {}
- for sliver_allocation in sliver_allocations:
- sliver_allocation_dict[
- sliver_allocation.sliver_id] = sliver_allocation
-
- geni_slivers = []
- rspec_nodes = []
- for instance in instances:
- rspec_nodes.append(self.instance_to_rspec_node(instance))
- geni_sliver = self.instance_to_geni_sliver(
- instance, sliver_sllocation_dict)
- geni_slivers.append(geni_sliver)
- version_manager = VersionManager()
- version = version_manager.get_version(version)
- rspec_version = version_manager._get_version(
- version.type, version.version, 'manifest')
- rspec = RSpec(version=rspec_version, user_options=options)
- rspec.xml.set('expires', datetime_to_string(utcparse(time.time())))
- rspec.version.add_nodes(rspec_nodes)
- result = {'geni_urn': Xrn(urns[0]).get_urn(),
- 'geni_rspec': rspec.toxml(),
- 'geni_slivers': geni_slivers}
-
- return result
-
- def get_instances(self, urns):
- # parse slice names and sliver ids
- names = set()
- ids = set()
- for urn in urns:
- xrn = OSXrn(xrn=urn)
- if xrn.type == 'slice':
- names.add(xrn.get_slice_name())
- elif xrn.type == 'sliver':
- ids.add(xrn.leaf)
-
- # look up instances
- instances = []
- filter = {}
- if names:
- filter['name'] = names
- if ids:
- filter['id'] = ids
- servers = self.driver.shell.nova_manager.servers.findall(**filter)
- instances.extend(servers)
-
- return instances
-
- def instance_to_rspec_node(self, instance):
- # determine node urn
- node_xrn = instance.metadata.get('component_id')
- if not node_xrn:
- node_xrn = OSXrn('cloud', type='node')
- else:
- node_xrn = OSXrn(xrn=node_xrn, type='node')
-
- rspec_node = Node()
- rspec_node['component_id'] = node_xrn.urn
- rspec_node['component_name'] = node_xrn.name
- rspec_node['component_manager_id'] = Xrn(
- self.driver.hrn, 'authority+cm').get_urn()
- rspec_node['sliver_id'] = OSXrn(
- name=instance.name, type='slice', id=instance.id).get_urn()
- if instance.metadata.get('client_id'):
- rspec_node['client_id'] = instance.metadata.get('client_id')
-
- # get sliver details
- flavor = self.driver.shell.nova_manager.flavors.find(
- id=instance.flavor['id'])
- sliver = self.instance_to_sliver(flavor)
- # get firewall rules
- fw_rules = []
- group_name = instance.metadata.get('security_groups')
- if group_name:
- group = self.driver.shell.nova_manager.security_groups.find(
- name=group_name)
- for rule in group.rules:
- port_range = "%s:%s" % (rule['from_port'], rule['to_port'])
- fw_rule = FWRule({'protocol': rule['ip_protocol'],
- 'port_range': port_range,
- 'cidr_ip': rule['ip_range']['cidr']})
- fw_rules.append(fw_rule)
- sliver['fw_rules'] = fw_rules
- rspec_node['slivers'] = [sliver]
-
- # get disk image
- image = self.driver.shell.image_manager.get_images(
- id=instance.image['id'])
- if isinstance(image, list) and len(image) > 0:
- image = image[0]
- disk_image = image_to_rspec_disk_image(image)
- sliver['disk_image'] = [disk_image]
-
- # get interfaces
- rspec_node['services'] = []
- rspec_node['interfaces'] = []
- addresses = instance.addresses
- # HACK: public ips are stored in the list of private, but
- # this seems wrong. Assume pub ip is the last in the list of
- # private ips until openstack bug is fixed.
- if addresses.get('private'):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': addresses.get('private')[-1]['addr'],
- 'port': '22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
-
- for private_ip in addresses.get('private', []):
- if_xrn = PlXrn(auth=self.driver.hrn,
- interface='node%s' % (instance.hostId))
- if_client_id = Xrn(if_xrn.urn, type='interface',
- id="eth%s" % if_index).urn
- if_sliver_id = Xrn(
- rspec_node['sliver_id'], type='slice', id="eth%s" % if_index).urn
- interface = Interface({'component_id': if_xrn.urn,
- 'client_id': if_client_id,
- 'sliver_id': if_sliver_id})
- interface['ips'] = [{'address': private_ip['addr'],
- #'netmask': private_ip['network'],
- 'type': private_ip['version']}]
- rspec_node['interfaces'].append(interface)
-
- # slivers always provide the ssh service
- for public_ip in addresses.get('public', []):
- login = Login({'authentication': 'ssh-keys',
- 'hostname': public_ip['addr'],
- 'port': '22', 'username': 'root'})
- service = Services({'login': login})
- rspec_node['services'].append(service)
- return rspec_node
-
- def instance_to_sliver(self, instance, xrn=None):
- if xrn:
- sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
-
- sliver = Sliver({'sliver_id': sliver_id,
- 'name': instance.name,
- 'type': instance.name,
- 'cpus': str(instance.vcpus),
- 'memory': str(instance.ram),
- 'storage': str(instance.disk)})
- return sliver
-
- def instance_to_geni_sliver(self, instance, sliver_allocations=None):
- if sliver_allocations is None:
- sliver_allocations = {}
- sliver_hrn = '%s.%s' % (self.driver.hrn, instance.id)
- sliver_id = Xrn(sliver_hrn, type='sliver').urn
-
- # set sliver allocation and operational status
- sliver_allocation = sliver_allocations[sliver_id]
- if sliver_allocation:
- allocation_status = sliver_allocation.allocation_state
- if allocation_status == 'geni_allocated':
- op_status = 'geni_pending_allocation'
- elif allocation_status == 'geni_provisioned':
- state = instance.state.lower()
- if state == 'active':
- op_status = 'geni_ready'
- elif state == 'building':
- op_status = 'geni_notready'
- elif state == 'failed':
- op_status = ' geni_failed'
- else:
- op_status = 'geni_unknown'
- else:
- allocation_status = 'geni_unallocated'
- # required fields
- geni_sliver = {'geni_sliver_urn': sliver_id,
- 'geni_expires': None,
- 'geni_allocation_status': allocation_status,
- 'geni_operational_status': op_status,
- 'geni_error': None,
- 'plos_created_at': datetime_to_string(utcparse(instance.created)),
- 'plos_sliver_type': self.shell.nova_manager.flavors.find(id=instance.flavor['id']).name,
- }
-
- return geni_sliver
-
- def get_aggregate_nodes(self):
- zones = self.get_availability_zones()
- # available sliver/instance/vm types
- instances = self.driver.shell.nova_manager.flavors.list()
- if isinstance(instances, dict):
- instances = instances.values()
- # available images
- images = self.driver.shell.image_manager.get_images_detailed()
- disk_images = [image_to_rspec_disk_image(img) for img in images if img[
- 'container_format'] in ['ami', 'ovf']]
- rspec_nodes = []
- for zone in zones:
- rspec_node = Node()
- xrn = OSXrn(zone, type='node')
- rspec_node['component_id'] = xrn.urn
- rspec_node['component_name'] = xrn.name
- rspec_node['component_manager_id'] = Xrn(
- self.driver.hrn, 'authority+cm').get_urn()
- rspec_node['exclusive'] = 'false'
- rspec_node['hardware_types'] = [HardwareType({'name': 'plos-pc'}),
- HardwareType({'name': 'pc'})]
- slivers = []
- for instance in instances:
- sliver = self.instance_to_sliver(instance)
- sliver['disk_image'] = disk_images
- slivers.append(sliver)
- rspec_node['available'] = 'true'
- rspec_node['slivers'] = slivers
- rspec_nodes.append(rspec_node)
-
- return rspec_nodes
-
- def create_tenant(self, tenant_name):
- tenants = self.driver.shell.auth_manager.tenants.findall(
- name=tenant_name)
- if not tenants:
- self.driver.shell.auth_manager.tenants.create(
- tenant_name, tenant_name)
- tenant = self.driver.shell.auth_manager.tenants.find(
- name=tenant_name)
- else:
- tenant = tenants[0]
- return tenant
-
- def create_instance_key(self, slice_hrn, user):
- slice_name = Xrn(slice_hrn).leaf
- user_name = Xrn(user['urn']).leaf
- key_name = "%s_%s" % (slice_name, user_name)
- pubkey = user['keys'][0]
- key_found = False
- existing_keys = self.driver.shell.nova_manager.keypairs.findall(
- name=key_name)
- for existing_key in existing_keys:
- if existing_key.public_key != pubkey:
- self.driver.shell.nova_manager.keypairs.delete(existing_key)
- elif existing_key.public_key == pubkey:
- key_found = True
-
- if not key_found:
- self.driver.shell.nova_manager.keypairs.create(key_name, pubkey)
- return key_name
-
- def create_security_group(self, slicename, fw_rules=None):
- if fw_rules is None:
- fw_rules = []
- # use default group by default
- group_name = 'default'
- if isinstance(fw_rules, list) and fw_rules:
- # Each sliver get's its own security group.
- # Keep security group names unique by appending some random
- # characters on end.
- random_name = "".join([random.choice(string.letters + string.digits)
- for i in xrange(6)])
- group_name = slicename + random_name
- security_group = SecurityGroup(self.driver)
- security_group.create_security_group(group_name)
- for rule in fw_rules:
- security_group.add_rule_to_group(group_name,
- protocol=rule.get('protocol'),
- cidr_ip=rule.get('cidr_ip'),
- port_range=rule.get(
- 'port_range'),
- icmp_type_code=rule.get('icmp_type_code'))
- # Open ICMP by default
- security_group.add_rule_to_group(group_name,
- protocol="icmp",
- cidr_ip="0.0.0.0/0",
- icmp_type_code="-1:-1")
- return group_name
-
- def add_rule_to_security_group(self, group_name, **kwds):
- security_group = SecurityGroup(self.driver)
- security_group.add_rule_to_group(group_name=group_name,
- protocol=kwds.get('protocol'),
- cidr_ip=kwds.get('cidr_ip'),
- icmp_type_code=kwds.get('icmp_type_code'))
-
- def run_instances(self, instance_name, tenant_name, rspec, key_name, pubkeys):
- # logger.debug('Reserving an instance: image: %s, flavor: ' \
- # '%s, key: %s, name: %s' % \
- # (image_id, flavor_id, key_name, slicename))
-
- # make sure a tenant exists for this slice
- tenant = self.create_tenant(tenant_name)
-
- # add the sfa admin user to this tenant and update our nova client connection
- # to use these credentials for the rest of this session. This emsures that the instances
- # we create will be assigned to the correct tenant.
- sfa_admin_user = self.driver.shell.auth_manager.users.find(
- name=self.driver.shell.auth_manager.opts['OS_USERNAME'])
- user_role = self.driver.shell.auth_manager.roles.find(name='user')
- admin_role = self.driver.shell.auth_manager.roles.find(name='admin')
- self.driver.shell.auth_manager.roles.add_user_role(
- sfa_admin_user, admin_role, tenant)
- self.driver.shell.auth_manager.roles.add_user_role(
- sfa_admin_user, user_role, tenant)
- self.driver.shell.nova_manager.connect(tenant=tenant.name)
-
- authorized_keys = "\n".join(pubkeys)
- files = {'/root/.ssh/authorized_keys': authorized_keys}
- rspec = RSpec(rspec)
- requested_instances = defaultdict(list)
-
- # iterate over clouds/zones/nodes
- slivers = []
- for node in rspec.version.get_nodes_with_slivers():
- instances = node.get('slivers', [])
- if not instances:
- continue
- for instance in instances:
- try:
- metadata = {}
- flavor_id = self.driver.shell.nova_manager.flavors.find(name=instance[
- 'name'])
- image = instance.get('disk_image')
- if image and isinstance(image, list):
- image = image[0]
- else:
- raise InvalidRSpec(
- "Must specify a disk_image for each VM")
- image_id = self.driver.shell.nova_manager.images.find(name=image[
- 'name'])
- fw_rules = instance.get('fw_rules', [])
- group_name = self.create_security_group(
- instance_name, fw_rules)
- metadata['security_groups'] = group_name
- if node.get('component_id'):
- metadata['component_id'] = node['component_id']
- if node.get('client_id'):
- metadata['client_id'] = node['client_id']
- server = self.driver.shell.nova_manager.servers.create(
- flavor=flavor_id,
- image=image_id,
- key_name=key_name,
- security_groups=[group_name],
- files=files,
- meta=metadata,
- name=instance_name)
- slivers.append(server)
- except Exception as err:
- logger.log_exc(err)
-
- return slivers
-
- def delete_instance(self, instance):
-
- def _delete_security_group(inst):
- security_group = inst.metadata.get('security_groups', '')
- if security_group:
- manager = SecurityGroup(self.driver)
- timeout = 10.0 # wait a maximum of 10 seconds before forcing the security group delete
- start_time = time.time()
- instance_deleted = False
- while instance_deleted == False and (time.time() - start_time) < timeout:
- tmp_inst = self.driver.shell.nova_manager.servers.findall(
- id=inst.id)
- if not tmp_inst:
- instance_deleted = True
- time.sleep(.5)
- manager.delete_security_group(security_group)
-
- multiclient = MultiClient()
- tenant = self.driver.shell.auth_manager.tenants.find(
- id=instance.tenant_id)
- self.driver.shell.nova_manager.connect(tenant=tenant.name)
- args = {'name': instance.name,
- 'id': instance.id}
- instances = self.driver.shell.nova_manager.servers.findall(**args)
- security_group_manager = SecurityGroup(self.driver)
- for instance in instances:
- # destroy instance
- self.driver.shell.nova_manager.servers.delete(instance)
- # deleate this instance's security groups
- multiclient.run(_delete_security_group, instance)
- return 1
-
- def stop_instances(self, instance_name, tenant_name, id=None):
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- args = {'name': instance_name}
- if id:
- args['id'] = id
- instances = self.driver.shell.nova_manager.servers.findall(**args)
- for instance in instances:
- self.driver.shell.nova_manager.servers.pause(instance)
- return 1
-
- def start_instances(self, instance_name, tenant_name, id=None):
- self.driver.shell.nova_manager.connect(tenant=tenant_name)
- args = {'name': instance_name}
- if id:
- args['id'] = id
- instances = self.driver.shell.nova_manager.servers.findall(**args)
- for instance in instances:
- self.driver.shell.nova_manager.servers.resume(instance)
- return 1
-
- def restart_instances(self, instacne_name, tenant_name, id=None):
- self.stop_instances(instance_name, tenant_name, id)
- self.start_instances(instance_name, tenant_name, id)
- return 1
-
- def update_instances(self, project_name):
- pass
+++ /dev/null
-import re
-from sfa.util.xrn import Xrn
-from sfa.util.config import Config
-
-
-def hrn_to_os_slicename(hrn):
- return OSXrn(xrn=hrn, type='slice').get_slicename()
-
-
-def hrn_to_os_tenant_name(hrn):
- return OSXrn(xrn=hrn, type='slice').get_tenant_name()
-
-
-def cleanup_name(name):
- return name.replace(".", "_").replace("+", "_")
-
-
-class OSXrn(Xrn):
-
- def __init__(self, name=None, auth=None, **kwds):
-
- config = Config()
- self.id = id
- if name is not None:
- Xrn.__init__(self, **kwds)
- if 'type' in kwds:
- self.type = kwds['type']
- if auth is not None:
- self.hrn = '.'.join([auth, cleanup_name(name)])
- else:
- self.hrn = name.replace('_', '.')
- self.hrn_to_urn()
- else:
- Xrn.__init__(self, **kwds)
-
- self.name = self.get_name()
-
- def get_name(self):
- self._normalize()
- leaf = self.leaf
- sliver_id_parts = leaf.split(':')
- name = sliver_id_parts[0]
- name = re.sub('[^a-zA-Z0-9_]', '', name)
- return name
-
- def get_slicename(self):
- self._normalize()
- slicename = self.hrn
- slicename = slicename.split(':')[0]
- slicename = re.sub('[\.]', '_', slicename)
- return slicename
-
- def get_tenant_name(self):
- self._normalize()
- tenant_name = self.hrn.replace('\.', '')
- return tenant_name
+++ /dev/null
-from sfa.util.sfalogging import logger
-
-
-class SecurityGroup:
-
- def __init__(self, driver):
- self.client = driver.shell.nova_manager
-
- def create_security_group(self, name):
- try:
- self.client.security_groups.create(name=name, description=name)
- except Exception as ex:
- logger.log_exc("Failed to add security group")
- raise
-
- def delete_security_group(self, name):
- try:
- security_group = self.client.security_groups.find(name=name)
- self.client.security_groups.delete(security_group.id)
- except Exception as ex:
- logger.log_exc("Failed to delete security group")
-
- def _validate_port_range(self, port_range):
- from_port = to_port = None
- if isinstance(port_range, str):
- ports = port_range.split(':')
- if len(ports) > 1:
- from_port = int(ports[0])
- to_port = int(ports[1])
- else:
- from_port = to_port = int(ports[0])
- return (from_port, to_port)
-
- def _validate_icmp_type_code(self, icmp_type_code):
- from_port = to_port = None
- if isinstance(icmp_type_code, str):
- code_parts = icmp_type_code.split(':')
- if len(code_parts) > 1:
- try:
- from_port = int(code_parts[0])
- to_port = int(code_parts[1])
- except ValueError:
- logger.error('port must be an integer.')
- return (from_port, to_port)
-
- def add_rule_to_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
- port_range=None, icmp_type_code=None,
- source_group_name=None, source_group_owner_id=None):
-
- try:
- from_port, to_port = self._validate_port_range(port_range)
- icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type and icmp_type[0] and icmp_type[1]:
- from_port, to_port = icmp_type[0], icmp_type[1]
-
- group = self.client.security_groups.find(name=group_name)
- self.client.security_group_rules.create(group.id,
- protocol, from_port, to_port, cidr_ip)
- except Exception as ex:
- logger.log_exc("Failed to add rule to group %s" % group_name)
-
- def remove_rule_from_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
- port_range=None, icmp_type_code=None,
- source_group_name=None, source_group_owner_id=None):
- try:
- from_port, to_port = self._validate_port_range(port_range)
- icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type:
- from_port, to_port = icmp_type[0], icmp_type[1]
- group = self.client.security_groups.find(name=group_name)
- filter = {
- 'id': group.id,
- 'from_port': from_port,
- 'to_port': to_port,
- 'cidr_ip': ip,
- 'ip_protocol': protocol,
- }
- rule = self.client.security_group_rules.find(**filter)
- if rule:
- self.client.security_group_rules.delete(rule)
- except Exception as ex:
- logger.log_exc("Failed to remove rule from group %s" % group_name)
+++ /dev/null
-import sys
-import socket
-import gettext
-from urlparse import urlparse
-from sfa.util.sfalogging import logger
-from sfa.util.config import Config
-
-try:
- from sfa.openstack.client import GlanceClient, NovaClient, KeystoneClient
- has_nova = True
-except:
- has_nova = False
-
-
-class Shell:
- """
- A simple native shell to a nova backend.
- This class can receive all nova calls to the underlying testbed
- """
-
- # dont care about limiting calls yet
- direct_calls = []
- alias_calls = {}
-
- # use the 'capability' auth mechanism for higher performance when the PLC
- # db is local
- def __init__(self, config=None):
- if not config:
- config = Config()
- if has_nova:
- # instantiate managers
- self.auth_manager = KeystoneClient(config=config)
- self.image_manager = GlanceClient(config=config)
- self.nova_manager = NovaClient(config=config)
- else:
- logger.debug('nova access - REST')
- raise SfaNotImplemented('nova access - Rest')
+++ /dev/null
-import os
-import tempfile
-
-from sfa.client.sfaserverproxy import SfaServerProxy
-from sfa.planetlab.nodemanager import NodeManager
-
-from sfa.trust.credential import Credential
-from sfa.trust.certificate import Certificate, Keypair
-from sfa.trust.gid import GID
-
-####################
-
-
-class PlComponentDriver:
- """
- This class is the type for the toplevel 'api' object
- when running the component manager inside a planetlab node.
- As such it runs an SFA-compliant interface and thus inherits SfaApi
- However the fact that we run inside a planetlab nodes requires
- some tweaks as compared with a service running in the infrastructure.
- """
-
- def __init__(self, config):
- self.nodemanager = NodeManager(config)
-
- def sliver_exists(self):
- sliver_dict = self.nodemanager.GetXIDs()
- # xxx slicename is undefined
- if slicename in sliver_dict.keys():
- return True
- else:
- return False
-
- def get_registry(self):
- addr, port = self.config.SFA_REGISTRY_HOST, self.config.SFA_REGISTRY_PORT
- url = "http://%(addr)s:%(port)s" % locals()
- # xxx this would require access to the api...
- server = SfaServerProxy(url, self.key_file, self.cert_file)
- return server
-
- def get_node_key(self):
- # this call requires no authentication,
- # so we can generate a random keypair here
- subject = "component"
- (kfd, keyfile) = tempfile.mkstemp()
- (cfd, certfile) = tempfile.mkstemp()
- key = Keypair(create=True)
- key.save_to_file(keyfile)
- cert = Certificate(subject=subject)
- cert.set_issuer(key=key, subject=subject)
- cert.set_pubkey(key)
- cert.sign()
- cert.save_to_file(certfile)
- registry = self.get_registry()
- # the registry will scp the key onto the node
- registry.get_key_from_incoming_ip()
-
- # override the method in SfaApi
- def getCredential(self):
- """
- Get our credential from a remote registry
- """
- path = self.config.SFA_DATA_DIR
- config_dir = self.config.config_path
- cred_filename = path + os.sep + 'node.cred'
- try:
- credential = Credential(filename=cred_filename)
- return credential.save_to_string(save_parents=True)
- except IOError:
- node_pkey_file = config_dir + os.sep + "node.key"
- node_gid_file = config_dir + os.sep + "node.gid"
- cert_filename = path + os.sep + 'server.cert'
- if not os.path.exists(node_pkey_file) or \
- not os.path.exists(node_gid_file):
- self.get_node_key()
-
- # get node's hrn
- gid = GID(filename=node_gid_file)
- hrn = gid.get_hrn()
- # get credential from registry
- cert_str = Certificate(
- filename=cert_filename).save_to_string(save_parents=True)
- registry = self.get_registry()
- cred = registry.GetSelfCredential(cert_str, hrn, 'node')
- # xxx credfile is undefined
- Credential(string=cred).save_to_file(credfile, save_parents=True)
-
- return cred
-
- def clean_key_cred(self):
- """
- remove the existing keypair and cred and generate new ones
- """
- files = ["server.key", "server.cert", "node.cred"]
- for f in files:
- # xxx KEYDIR is undefined, could be meant to be "/var/lib/sfa/"
- # from sfa_component_setup.py
- filepath = KEYDIR + os.sep + f
- if os.path.isfile(filepath):
- os.unlink(f)
-
- # install the new key pair
- # GetCredential will take care of generating the new keypair
- # and credential
- self.get_node_key()
- self.getCredential()
For safety this is limited to a set of hard-coded calls
"""
- direct_calls = ['AddNode', 'AddPerson', 'AddPersonKey', 'AddPersonToSite',
- 'AddPersonToSlice', 'AddRoleToPerson', 'AddSite', 'AddSiteTag', 'AddSlice',
- 'AddSliceTag', 'AddSliceToNodes', 'BindObjectToPeer', 'DeleteKey',
- 'DeleteNode', 'DeletePerson', 'DeletePersonFromSlice', 'DeleteSite',
- 'DeleteSlice', 'DeleteSliceFromNodes', 'DeleteSliceTag', 'GetInitScripts',
- 'GetInterfaces', 'GetKeys', 'GetNodeTags', 'GetPeers',
- 'GetPersons', 'GetSlices', 'GetSliceTags', 'GetTagTypes',
- 'UnBindObjectFromPeer', 'UpdateNode', 'UpdatePerson', 'UpdateSite',
- 'UpdateSlice', 'UpdateSliceTag',
- # also used as-is in importer
- 'GetSites', 'GetNodes', 'GetSiteTags',
- # Lease management methods
- 'GetLeases', 'GetLeaseGranularity', 'DeleteLeases', 'UpdateLeases',
- 'AddLeases',
- # HRN management methods
- 'SetPersonHrn', 'GetPersonHrn', 'SetSliceHrn', 'GetSliceHrn',
- 'SetNodeHrn', 'GetNodeHrn', 'GetSiteHrn', 'SetSiteHrn',
- # Tag slice/person/site created by SFA
- 'SetPersonSfaCreated', 'GetPersonSfaCreated', 'SetSliceSfaCreated',
- 'GetSliceSfaCreated', 'SetNodeSfaCreated', 'GetNodeSfaCreated',
- 'GetSiteSfaCreated', 'SetSiteSfaCreated',
- ]
+ direct_calls = [
+ 'AddNode', 'AddPerson', 'AddPersonKey', 'AddPersonToSite',
+ 'AddPersonToSlice', 'AddRoleToPerson', 'AddSite', 'AddSiteTag', 'AddSlice',
+ 'AddSliceTag', 'AddSliceToNodes', 'BindObjectToPeer', 'DeleteKey',
+ 'DeleteNode', 'DeletePerson', 'DeletePersonFromSlice', 'DeleteSite',
+ 'DeleteSlice', 'DeleteSliceFromNodes', 'DeleteSliceTag', 'GetInitScripts',
+ 'GetInterfaces', 'GetKeys', 'GetNodeTags', 'GetPeers',
+ 'GetPersons', 'GetSlices', 'GetSliceTags', 'GetTagTypes',
+ 'UnBindObjectFromPeer', 'UpdateNode', 'UpdatePerson', 'UpdateSite',
+ 'UpdateSlice', 'UpdateSliceTag',
+ # also used as-is in importer
+ 'GetSites', 'GetNodes', 'GetSiteTags',
+ # Lease management methods
+ 'GetLeases', 'GetLeaseGranularity', 'DeleteLeases', 'UpdateLeases',
+ 'AddLeases',
+ # HRN management methods
+ 'SetPersonHrn', 'GetPersonHrn', 'SetSliceHrn', 'GetSliceHrn',
+ 'SetNodeHrn', 'GetNodeHrn', 'GetSiteHrn', 'SetSiteHrn',
+ # Tag slice/person/site created by SFA
+ 'SetPersonSfaCreated', 'GetPersonSfaCreated', 'SetSliceSfaCreated',
+ 'GetSliceSfaCreated', 'SetNodeSfaCreated', 'GetNodeSfaCreated',
+ 'GetSiteSfaCreated', 'SetSiteSfaCreated',
+ ]
# support for other names - this is experimental
- alias_calls = {'get_authorities': 'GetSites',
- 'get_nodes': 'GetNodes',
- }
+ alias_calls = {
+ 'get_authorities': 'GetSites',
+ 'get_nodes': 'GetNodes',
+ }
# use the 'capability' auth mechanism for higher performance when the PLC
# db is local
except:
plc_direct_access = False
if is_local and plc_direct_access:
- logger.info('plshell access - capability')
- self.plauth = {'AuthMethod': 'capability',
- 'Username': str(config.SFA_PLC_USER),
- 'AuthString': str(config.SFA_PLC_PASSWORD),
- }
+ logger.debug('plshell access - capability')
+ self.plauth = {
+ 'AuthMethod': 'capability',
+ 'Username': str(config.SFA_PLC_USER),
+ 'AuthString': str(config.SFA_PLC_PASSWORD),
+ }
self.proxy = PLC.Shell.Shell()
else:
- logger.info('plshell access - xmlrpc')
- self.plauth = {'AuthMethod': 'password',
- 'Username': str(config.SFA_PLC_USER),
- 'AuthString': str(config.SFA_PLC_PASSWORD),
- }
+ logger.debug('plshell access - xmlrpc')
+ self.plauth = {
+ 'AuthMethod': 'password',
+ 'Username': str(config.SFA_PLC_USER),
+ 'AuthString': str(config.SFA_PLC_PASSWORD),
+ }
self.proxy = xmlrpclib.Server(url, verbose=False, allow_none=True)
def __getattr__(self, name):
from sfa.rspecs.rspec import RSpec
from sfa.planetlab.vlink import VLink
from sfa.planetlab.topology import Topology
-from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename, xrn_to_hostname, top_auth, hash_loginbase
+from sfa.planetlab.plxrn import (PlXrn, hrn_to_pl_slicename, xrn_to_hostname,
+ top_auth, hash_loginbase)
from sfa.storage.model import SliverAllocation
MAXINT = 2L**31 - 1
person_ids = list(person_ids)
all_slice_tag_ids = list(all_slice_tag_ids)
# Get user information
- all_persons_list = self.driver.shell.GetPersons({'person_id': person_ids, 'enabled': True},
- ['person_id', 'enabled', 'key_ids'])
+ all_persons_list = self.driver.shell.GetPersons(
+ {'person_id': person_ids, 'enabled': True},
+ ['person_id', 'enabled', 'key_ids'])
all_persons = {}
for person in all_persons_list:
all_persons[person['person_id']] = person
sliver_attributes = []
if node is not None:
- for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
+ for sliver_attribute in filter(
+ lambda a: a['node_id'] == node['node_id'],
+ slice_tags):
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
# set nodegroup slice attributes
- for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
+ for slice_tag in filter(
+ lambda a: a['nodegroup_id'] in node['nodegroup_ids'],
+ slice_tags):
# Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
- for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
+ for slice_tag in filter(
+ lambda a: a['node_id'] is None,
+ slice_tags):
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
def verify_slice_leases(self, slice, rspec_requested_leases):
- leases = self.driver.shell.GetLeases({'name': slice['name'], 'clip': int(time.time())},
- ['lease_id', 'name', 'hostname', 't_from', 't_until'])
+ leases = self.driver.shell.GetLeases(
+ {'name': slice['name'], 'clip': int(time.time())},
+ ['lease_id', 'name', 'hostname', 't_from', 't_until'])
grain = self.driver.shell.GetLeaseGranularity()
requested_leases = []
if slice_name != slice['name']:
continue
- elif Xrn(lease['component_id']).get_authority_urn().split(':')[0] != self.driver.hrn:
+ elif (Xrn(lease['component_id']).get_authority_urn().split(':')[0]
+ != self.driver.hrn):
continue
hostname = xrn_to_hostname(lease['component_id'])
# prepare actual slice leases by lease_id
leases_by_id = {}
for lease in leases:
- leases_by_id[lease['lease_id']] = {'name': lease['name'], 'hostname': lease['hostname'],
- 't_from': lease['t_from'], 't_until': lease['t_until']}
+ leases_by_id[lease['lease_id']] = {
+ 'name': lease['name'], 'hostname': lease['hostname'],
+ 't_from': lease['t_from'], 't_until': lease['t_until']}
added_leases = []
kept_leases_id = []
try:
self.driver.shell.DeleteLeases(deleted_leases_id)
for lease in added_leases:
- self.driver.shell.AddLeases(lease['hostname'], slice['name'], lease[
- 't_from'], lease['t_until'])
+ self.driver.shell.AddLeases(
+ lease['hostname'], slice['name'],
+ lease['t_from'], lease['t_until'])
- except:
+ except Exception:
logger.log_exc('Failed to add/remove slice leases')
return leases
self.driver.shell.DeleteSliceFromNodes(
slice['name'], deleted_nodes)
- except:
+ except Exception:
logger.log_exc('Failed to add/remove slice from nodes')
slices = self.driver.shell.GetSlices(slice['name'], ['node_ids'])
self.verify_slice_tags(slice, slice_tags, {
'pltags': 'append'}, admin=True)
- def verify_site(self, slice_xrn, slice_record=None, sfa_peer=None, options=None):
+ def verify_site(self, slice_xrn,
+ slice_record=None, sfa_peer=None, options=None):
if slice_record is None:
slice_record = {}
if options is None:
login_base = hash_loginbase(site_hrn)
# filter sites by hrn
- sites = self.driver.shell.GetSites({'peer_id': None, 'hrn': site_hrn},
- ['site_id', 'name', 'abbreviated_name', 'login_base', 'hrn'])
+ sites = self.driver.shell.GetSites(
+ {'peer_id': None, 'hrn': site_hrn},
+ ['site_id', 'name', 'abbreviated_name', 'login_base', 'hrn'])
# alredy exists
if sites:
return site
- def verify_slice(self, slice_hrn, slice_record, sfa_peer, expiration, options=None):
+ def verify_slice(self, slice_hrn, slice_record,
+ sfa_peer, expiration, options=None):
if options is None:
options = {}
top_auth_hrn = top_auth(slice_hrn)
expires = int(datetime_to_epoch(utcparse(expiration)))
# Filter slices by HRN
- slices = self.driver.shell.GetSlices({'peer_id': None, 'hrn': slice_hrn},
- ['slice_id', 'name', 'hrn', 'expires'])
+ slices = self.driver.shell.GetSlices(
+ {'peer_id': None, 'hrn': slice_hrn},
+ ['slice_id', 'name', 'hrn', 'expires'])
if slices:
slice = slices[0]
return person_id
- def verify_persons(self, slice_hrn, slice_record, users, sfa_peer, options=None):
+ def verify_persons(self, slice_hrn, slice_record,
+ users, sfa_peer, options=None):
if options is None:
options = {}
site_id = site['site_id']
# locate the slice object
- slice = self.driver.shell.GetSlices({'peer_id': None, 'hrn': slice_hrn}, [
- 'slice_id', 'hrn', 'person_ids'])[0]
+ slice = self.driver.shell.GetSlices(
+ {'peer_id': None, 'hrn': slice_hrn},
+ ['slice_id', 'hrn', 'person_ids'])[0]
slice_id = slice['slice_id']
slice_person_ids = slice['person_ids']
# and for this we need all the Person objects; we already have the target_existing ones
# also we avoid issuing a call if possible
target_created_persons = [] if not target_created_person_ids \
- else self.driver.shell.GetPersons \
- ({'peer_id': None, 'person_id': target_created_person_ids}, person_fields)
- persons_by_person_id = {person['person_id']: person
- for person in target_existing_persons + target_created_persons}
+ else self.driver.shell.GetPersons(
+ {'peer_id': None, 'person_id': target_created_person_ids},
+ person_fields)
+ persons_by_person_id = {
+ person['person_id']: person
+ for person in target_existing_persons + target_created_persons}
def user_by_person_id(person_id):
person = persons_by_person_id[person_id]
# return hrns of the newly added persons
- return [persons_by_person_id[person_id]['hrn'] for person_id in add_person_ids]
+ return [persons_by_person_id[person_id]['hrn']
+ for person_id in add_person_ids]
def verify_keys(self, persons_to_verify_keys, options=None):
if options is None:
key = {'key': key_string, 'key_type': 'ssh'}
self.driver.shell.AddPersonKey(int(person_id), key)
- def verify_slice_tags(self, slice, requested_slice_attributes, options=None, admin=False):
+ def verify_slice_tags(self, slice, requested_slice_attributes,
+ options=None, admin=False):
"""
This function deals with slice tags, and supports 3 modes described
in the 'pltags' option that can be either
(*) 'ignore' (default) - do nothing
(*) 'append' - only add incoming tags, that do not match an existing tag
- (*) 'sync' - tries to do the plain wholesale thing,
+ (*) 'sync' - tries to do the plain wholesale thing,
i.e. to leave the db in sync with incoming tags
"""
if options is None:
# be removed
tag_found = False
for requested_attribute in requested_slice_attributes:
- if requested_attribute['name'] == slice_tag['tagname'] and \
- requested_attribute['value'] == slice_tag['value']:
+ if (requested_attribute['name'] == slice_tag['tagname'] and
+ requested_attribute['value'] == slice_tag['value']):
tag_found = True
break
# remove tags only if not in append mode
if requested_attribute['name'] in valid_tag_names:
tag_found = False
for existing_attribute in existing_slice_tags:
- if requested_attribute['name'] == existing_attribute['tagname'] and \
- requested_attribute['value'] == existing_attribute['value']:
+ if (requested_attribute['name'] == existing_attribute['tagname'] and \
+ requested_attribute['value'] == existing_attribute['value']):
tag_found = True
break
if not tag_found:
name = tag_or_att[
'tagname'] if 'tagname' in tag_or_att else tag_or_att['name']
return "SliceTag slice={}, tagname={} value={}, node_id={}"\
- .format(slice['name'], tag_or_att['name'], tag_or_att['value'], tag_or_att.get('node_id'))
+ .format(slice['name'], tag_or_att['name'],
+ tag_or_att['value'], tag_or_att.get('node_id'))
# remove stale tags
for tag in slice_tags_to_remove:
friendly_message(tag)))
self.driver.shell.DeleteSliceTag(tag['slice_tag_id'])
except Exception as e:
- logger.warn("Failed to remove slice tag {}\nCause:{}"
- .format(friendly_message(tag), e))
+ logger.warning("Failed to remove slice tag {}\nCause:{}"
+ .format(friendly_message(tag), e))
# add requested_tags
for attribute in slice_attributes_to_add:
try:
logger.info("Adding Slice Tag {}".format(
friendly_message(attribute)))
- self.driver.shell.AddSliceTag(slice['name'], attribute['name'],
- attribute['value'], attribute.get('node_id', None))
+ self.driver.shell.AddSliceTag(
+ slice['name'], attribute['name'],
+ attribute['value'], attribute.get('node_id', None))
except Exception as e:
- logger.warn("Failed to add slice tag {}\nCause:{}"
- .format(friendly_message(attribute), e))
+ logger.warning("Failed to add slice tag {}\nCause:{}"
+ .format(friendly_message(attribute), e))
+++ /dev/null
-from sfa.util.sfalogging import logger
-from sfa.util.xml import XpathFilter
-from sfa.util.xrn import Xrn
-
-from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.node import NodeElement
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.disk_image import DiskImage
-from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.bwlimit import BWlimit
-from sfa.rspecs.elements.pltag import PLTag
-from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
-from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
-from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
-from sfa.rspecs.elements.lease import Lease
-from sfa.rspecs.elements.spectrum import Spectrum
-from sfa.rspecs.elements.channel import Channel
-
-
-class NITOSv1Channel:
-
- @staticmethod
- def add_channels(xml, channels):
-
- network_elems = xml.xpath('//network')
- if len(network_elems) > 0:
- network_elem = network_elems[0]
- elif len(channels) > 0:
- # dirty hack that handles no resource manifest rspec
- network_urn = "omf"
- network_elem = xml.add_element('network', name=network_urn)
- else:
- network_elem = xml
-
-# spectrum_elems = xml.xpath('//spectrum')
-# spectrum_elem = xml.add_element('spectrum')
-
-# if len(spectrum_elems) > 0:
-# spectrum_elem = spectrum_elems[0]
-# elif len(channels) > 0:
-# spectrum_elem = xml.add_element('spectrum')
-# else:
-# spectrum_elem = xml
-
- spectrum_elem = network_elem.add_instance('spectrum', [])
-
- channel_elems = []
- for channel in channels:
- channel_fields = ['channel_num',
- 'frequency', 'standard', 'component_id']
- channel_elem = spectrum_elem.add_instance(
- 'channel', channel, channel_fields)
- channel_elems.append(channel_elem)
-
- @staticmethod
- def get_channels(xml, filter=None):
- if filter is None:
- filter = {}
- xpath = '//channel%s | //default:channel%s' % (
- XpathFilter.xpath(filter), XpathFilter.xpath(filter))
- channel_elems = xml.xpath(xpath)
- return NITOSv1Channel.get_channel_objs(channel_elems)
-
- @staticmethod
- def get_channel_objs(channel_elems):
- channels = []
- for channel_elem in channel_elems:
- channel = Channel(channel_elem.attrib, channel_elem)
- channel['channel_num'] = channel_elem.attrib['channel_num']
- channel['frequency'] = channel_elem.attrib['frequency']
- channel['standard'] = channel_elem.attrib['standard']
- channel['component_id'] = channel_elem.attrib['component_id']
-
- channels.append(channel)
- return channels
+++ /dev/null
-from sfa.util.sfalogging import logger
-from sfa.util.xml import XpathFilter
-from sfa.util.xrn import Xrn
-from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
-
-from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.node import NodeElement
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.disk_image import DiskImage
-from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.bwlimit import BWlimit
-from sfa.rspecs.elements.pltag import PLTag
-from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
-from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
-from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
-from sfa.rspecs.elements.lease import Lease
-from sfa.rspecs.elements.channel import Channel
-
-
-class NITOSv1Lease:
-
- @staticmethod
- def add_leases(xml, leases, channels):
-
- network_elems = xml.xpath('//network')
- if len(network_elems) > 0:
- network_elem = network_elems[0]
- elif len(leases) > 0:
- network_urn = Xrn(leases[0]['component_id']
- ).get_authority_urn().split(':')[0]
- network_elem = xml.add_element('network', name=network_urn)
- else:
- network_elem = xml
-
- # group the leases by slice and timeslots
- grouped_leases = []
-
- while leases:
- slice_id = leases[0]['slice_id']
- start_time = leases[0]['start_time']
- duration = leases[0]['duration']
- group = []
-
- for lease in leases:
- if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
- group.append(lease)
-
- grouped_leases.append(group)
-
- for lease1 in group:
- leases.remove(lease1)
-
- lease_elems = []
- for lease in grouped_leases:
- #lease[0]['start_time'] = datetime_to_string(utcparse(lease[0]['start_time']))
-
- lease_fields = ['slice_id', 'start_time', 'duration']
- lease_elem = network_elem.add_instance(
- 'lease', lease[0], lease_fields)
- lease_elems.append(lease_elem)
-
- # add nodes of this lease
- for node in lease:
- lease_elem.add_instance('node', node, ['component_id'])
-
- # add reserved channels of this lease
- #channels = [{'channel_id': 1}, {'channel_id': 2}]
- for channel in channels:
- #channel['start_time'] = datetime_to_string(utcparse(channel['start_time']))
- if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
- lease_elem.add_instance(
- 'channel', channel, ['component_id'])
-
- @staticmethod
- def get_leases(xml, filter=None):
- if filter is None:
- filter = {}
- xpath = '//lease%s | //default:lease%s' % (
- XpathFilter.xpath(filter), XpathFilter.xpath(filter))
- lease_elems = xml.xpath(xpath)
- return NITOSv1Lease.get_lease_objs(lease_elems)
-
- @staticmethod
- def get_lease_objs(lease_elems):
- leases = []
- channels = []
- for lease_elem in lease_elems:
- # get nodes
- node_elems = lease_elem.xpath('./default:node | ./node')
- for node_elem in node_elems:
- lease = Lease(lease_elem.attrib, lease_elem)
- lease['slice_id'] = lease_elem.attrib['slice_id']
- #lease['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
- lease['start_time'] = lease_elem.attrib['start_time']
- lease['duration'] = lease_elem.attrib['duration']
- lease['component_id'] = node_elem.attrib['component_id']
- lease['type'] = 'node'
- leases.append(lease)
- # get channels
- channel_elems = lease_elem.xpath('./default:channel | ./channel')
- for channel_elem in channel_elems:
- channel = Channel(channel_elem.attrib, channel_elem)
- channel['slice_id'] = lease_elem.attrib['slice_id']
- #channel['start_time'] = datetime_to_epoch(utcparse(lease_elem.attrib['start_time']))
- channel['start_time'] = lease_elem.attrib['start_time']
- channel['duration'] = lease_elem.attrib['duration']
- channel['component_id'] = channel_elem.attrib['component_id']
- channel['type'] = 'channel'
- channels.append(channel)
-
- return leases + channels
+++ /dev/null
-from sfa.util.sfalogging import logger
-from sfa.util.xml import XpathFilter
-from sfa.util.xrn import Xrn
-
-from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.node import NodeElement
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.location import Location
-from sfa.rspecs.elements.position_3d import Position3D
-from sfa.rspecs.elements.hardware_type import HardwareType
-from sfa.rspecs.elements.disk_image import DiskImage
-from sfa.rspecs.elements.interface import Interface
-from sfa.rspecs.elements.bwlimit import BWlimit
-from sfa.rspecs.elements.pltag import PLTag
-from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
-from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
-from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
-
-
-class NITOSv1Node:
-
- @staticmethod
- def add_nodes(xml, nodes, rspec_content_type=None):
- network_elems = xml.xpath('//network')
- if len(network_elems) > 0:
- network_elem = network_elems[0]
- elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
- network_urn = nodes[0]['component_manager_id']
- network_elem = xml.add_element(
- 'network', name=Xrn(network_urn).get_hrn())
- else:
- network_elem = xml
-
- # needs to be improuved to retreive the gateway addr dynamically.
- gateway_addr = 'nitlab.inf.uth.gr'
-
- node_elems = []
- for node in nodes:
- node_fields = ['component_manager_id',
- 'component_id', 'boot_state']
- node_elem = network_elem.add_instance('node', node, node_fields)
- node_elems.append(node_elem)
-
- # determine network hrn
- network_hrn = None
- if 'component_manager_id' in node and node['component_manager_id']:
- network_hrn = Xrn(node['component_manager_id']).get_hrn()
-
- # set component_name attribute and hostname element
- if 'component_id' in node and node['component_id']:
- component_name = Xrn(xrn=node['component_id']).get_leaf()
- node_elem.set('component_name', component_name)
- hostname_elem = node_elem.add_element('hostname')
- hostname_elem.set_text(component_name)
-
- # set site id
- if 'authority_id' in node and node['authority_id']:
- node_elem.set('site_id', node['authority_id'])
-
- # add locaiton
- location = node.get('location')
- if location:
- node_elem.add_instance('location', location, Location.fields)
-
- # add 3D Position of the node
- position_3d = node.get('position_3d')
- if position_3d:
- node_elem.add_instance(
- 'position_3d', position_3d, Position3D.fields)
-
- # all nitos nodes are exculsive
- exclusive_elem = node_elem.add_element('exclusive')
- exclusive_elem.set_text('TRUE')
-
- # In order to access nitos nodes, one need to pass through the nitos gateway
- # here we advertise Nitos access gateway address
- gateway_elem = node_elem.add_element('gateway')
- gateway_elem.set_text(gateway_addr)
-
- # add granularity of the reservation system
- granularity = node.get('granularity')['grain']
- if granularity:
- #node_elem.add_instance('granularity', granularity, granularity.fields)
- granularity_elem = node_elem.add_element('granularity')
- granularity_elem.set_text(str(granularity))
- # add hardware type
- #hardware_type = node.get('hardware_type')
- # if hardware_type:
- # node_elem.add_instance('hardware_type', hardware_type)
- hardware_type_elem = node_elem.add_element('hardware_type')
- hardware_type_elem.set_text(node.get('hardware_type'))
-
- if isinstance(node.get('interfaces'), list):
- for interface in node.get('interfaces', []):
- node_elem.add_instance('interface', interface, [
- 'component_id', 'client_id', 'ipv4'])
-
- # if 'bw_unallocated' in node and node['bw_unallocated']:
- # bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
-
- PGv2Services.add_services(node_elem, node.get('services', []))
- tags = node.get('tags', [])
- if tags:
- for tag in tags:
- tag_elem = node_elem.add_element(tag['tagname'])
- tag_elem.set_text(tag['value'])
- NITOSv1Sliver.add_slivers(node_elem, node.get('slivers', []))
-
- # add sliver tag in Request Rspec
- if rspec_content_type == "request":
- node_elem.add_instance('sliver', '', [])
-
- @staticmethod
- def add_slivers(xml, slivers):
- component_ids = []
- for sliver in slivers:
- filter = {}
- if isinstance(sliver, str):
- filter['component_id'] = '*%s*' % sliver
- sliver = {}
- elif 'component_id' in sliver and sliver['component_id']:
- filter['component_id'] = '*%s*' % sliver['component_id']
- if not filter:
- continue
- nodes = NITOSv1Node.get_nodes(xml, filter)
- if not nodes:
- continue
- node = nodes[0]
- NITOSv1Sliver.add_slivers(node, sliver)
-
- @staticmethod
- def remove_slivers(xml, hostnames):
- for hostname in hostnames:
- nodes = NITOSv1Node.get_nodes(
- xml, {'component_id': '*%s*' % hostname})
- for node in nodes:
- slivers = NITOSv1Sliver.get_slivers(node.element)
- for sliver in slivers:
- node.element.remove(sliver.element)
-
- @staticmethod
- def get_nodes(xml, filter=None):
- if filter is None:
- filter = {}
- xpath = '//node%s | //default:node%s' % (
- XpathFilter.xpath(filter), XpathFilter.xpath(filter))
- node_elems = xml.xpath(xpath)
- return NITOSv1Node.get_node_objs(node_elems)
-
- @staticmethod
- def get_nodes_with_slivers(xml):
- xpath = '//node[count(sliver)>0] | //default:node[count(default:sliver)>0]'
- node_elems = xml.xpath(xpath)
- return NITOSv1Node.get_node_objs(node_elems)
-
- @staticmethod
- def get_node_objs(node_elems):
- nodes = []
- for node_elem in node_elems:
- node = NodeElement(node_elem.attrib, node_elem)
- if 'site_id' in node_elem.attrib:
- node['authority_id'] = node_elem.attrib['site_id']
- # get location
- location_elems = node_elem.xpath('./default:location | ./location')
- locations = [loc_elem.get_instance(
- Location) for loc_elem in location_elems]
- if len(locations) > 0:
- node['location'] = locations[0]
- # get bwlimit
- bwlimit_elems = node_elem.xpath('./default:bw_limit | ./bw_limit')
- bwlimits = [bwlimit_elem.get_instance(
- BWlimit) for bwlimit_elem in bwlimit_elems]
- if len(bwlimits) > 0:
- node['bwlimit'] = bwlimits[0]
- # get interfaces
- iface_elems = node_elem.xpath('./default:interface | ./interface')
- ifaces = [iface_elem.get_instance(
- Interface) for iface_elem in iface_elems]
- node['interfaces'] = ifaces
- # get services
- node['services'] = PGv2Services.get_services(node_elem)
- # get slivers
- node['slivers'] = NITOSv1Sliver.get_slivers(node_elem)
- # get tags
- node['tags'] = NITOSv1PLTag.get_pl_tags(
- node_elem, ignore=NodeElement.fields + ["hardware_type"])
- # get hardware types
- hardware_type_elems = node_elem.xpath(
- './default:hardware_type | ./hardware_type')
- node['hardware_types'] = [hw_type.get_instance(
- HardwareType) for hw_type in hardware_type_elems]
-
- # temporary... play nice with old slice manager rspec
- if not node['component_name']:
- hostname_elem = node_elem.find("hostname")
- if hostname_elem != None:
- node['component_name'] = hostname_elem.text
-
- nodes.append(node)
- return nodes
+++ /dev/null
-from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.pltag import PLTag
-
-
-class NITOSv1PLTag:
-
- @staticmethod
- def add_pl_tag(xml, name, value):
- for pl_tag in pl_tags:
- pl_tag_elem = xml.add_element(name)
- pl_tag_elem.set_text(value)
-
- @staticmethod
- def get_pl_tags(xml, ignore=None):
- if ignore is None:
- ignore = []
- pl_tags = []
- for elem in xml.iterchildren():
- if elem.tag not in ignore:
- pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})
- pl_tags.append(pl_tag)
- return pl_tags
+++ /dev/null
-from sfa.util.xrn import Xrn
-from sfa.util.xml import XmlElement
-
-from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.sliver import Sliver
-from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
-
-#from sfa.planetlab.plxrn import PlXrn
-
-
-class NITOSv1Sliver:
-
- @staticmethod
- def add_slivers(xml, slivers):
- if not slivers:
- return
- if not isinstance(slivers, list):
- slivers = [slivers]
- for sliver in slivers:
- sliver_elem = xml.add_instance('sliver', sliver, ['name'])
- tags = sliver.get('tags', [])
- if tags:
- for tag in tags:
- NITOSv1Sliver.add_sliver_attribute(
- sliver_elem, tag['tagname'], tag['value'])
- if sliver.get('sliver_id'):
- name = Xrn(xrn=sliver.get('sliver_id')
- ).get_hrn().split('.')[-1]
- sliver_elem.set('name', name)
-
- @staticmethod
- def add_sliver_attribute(xml, name, value):
- elem = xml.add_element(name)
- elem.set_text(value)
-
- @staticmethod
- def get_sliver_attributes(xml):
- attribs = []
- for elem in xml.iterchildren():
- if elem.tag not in Sliver.fields:
- xml_element = XmlElement(elem, xml.namespaces)
- instance = Element(fields=xml_element, element=elem)
- instance['name'] = elem.tag
- instance['value'] = elem.text
- attribs.append(instance)
- return attribs
-
- @staticmethod
- def get_slivers(xml, filter=None):
- if filter is None:
- filter = {}
- xpath = './default:sliver | ./sliver'
- sliver_elems = xml.xpath(xpath)
- slivers = []
- for sliver_elem in sliver_elems:
- sliver = Sliver(sliver_elem.attrib, sliver_elem)
- if 'component_id' in xml.attrib:
- sliver['component_id'] = xml.attrib['component_id']
- sliver['tags'] = NITOSv1Sliver.get_sliver_attributes(sliver_elem)
- slivers.append(sliver)
- return slivers
+++ /dev/null
-from sfa.rspecs.versions.pgv2 import PGv2
-
-
-class FedericaAd (PGv2):
- enabled = True
- type = 'Fedrica'
- content_type = 'ad'
- schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/ad.xsd'
- namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
-
-
-class FedericaRequest (PGv2):
- enabled = True
- type = 'Fedrica'
- content_type = 'request'
- schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/request.xsd'
- namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
-
-
-class FedericaManifest (PGv2):
- enabled = True
- type = 'Fedrica'
- content_type = 'manifest'
- schema = 'http://sorch.netmode.ntua.gr/ws/RSpec/manifest.xsd'
- namespace = 'http://sorch.netmode.ntua.gr/ws/RSpec'
+++ /dev/null
-from __future__ import print_function
-
-from copy import deepcopy
-from lxml import etree
-
-from sfa.util.sfalogging import logger
-from sfa.util.xrn import hrn_to_urn, urn_to_hrn
-from sfa.rspecs.version import RSpecVersion
-from sfa.rspecs.elements.element import Element
-from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
-from sfa.rspecs.elements.versions.nitosv1Node import NITOSv1Node
-from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
-from sfa.rspecs.elements.versions.nitosv1Lease import NITOSv1Lease
-from sfa.rspecs.elements.versions.nitosv1Channel import NITOSv1Channel
-
-
-class NITOSv1(RSpecVersion):
- enabled = True
- type = 'NITOS'
- content_type = '*'
- version = '1'
- schema = None
- namespace = None
- extensions = {}
- namespaces = None
- template = '<RSpec type="%s"></RSpec>' % type
-
- # Network
- def get_networks(self):
- network_elems = self.xml.xpath('//network')
- networks = [network_elem.get_instance(fields=['name', 'slice']) for
- network_elem in network_elems]
- return networks
-
- def add_network(self, network):
- network_tags = self.xml.xpath('//network[@name="%s"]' % network)
- if not network_tags:
- network_tag = self.xml.add_element('network', name=network)
- else:
- network_tag = network_tags[0]
- return network_tag
-
- # Nodes
-
- def get_nodes(self, filter=None):
- return NITOSv1Node.get_nodes(self.xml, filter)
-
- def get_nodes_with_slivers(self):
- return NITOSv1Node.get_nodes_with_slivers(self.xml)
-
- def add_nodes(self, nodes, network=None, no_dupes=False, rspec_content_type=None):
- NITOSv1Node.add_nodes(self.xml, nodes, rspec_content_type)
-
- def merge_node(self, source_node_tag, network, no_dupes=False):
- if no_dupes and self.get_node_element(node['hostname']):
- # node already exists
- return
-
- network_tag = self.add_network(network)
- network_tag.append(deepcopy(source_node_tag))
-
- # Slivers
-
- def add_slivers(self, hostnames, attributes=None, sliver_urn=None, append=False):
- if attributes is None:
- attributes = []
- # add slice name to network tag
- network_tags = self.xml.xpath('//network')
- if network_tags:
- network_tag = network_tags[0]
- network_tag.set('slice', urn_to_hrn(sliver_urn)[0])
-
- # add slivers
- sliver = {'name': sliver_urn,
- 'pl_tags': attributes}
- for hostname in hostnames:
- if sliver_urn:
- sliver['name'] = sliver_urn
- node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
- if not node_elems:
- continue
- node_elem = node_elems[0]
- NITOSv1Sliver.add_slivers(node_elem.element, sliver)
-
- # remove all nodes without slivers
- if not append:
- for node_elem in self.get_nodes():
- if not node_elem['slivers']:
- parent = node_elem.element.getparent()
- parent.remove(node_elem.element)
-
- def remove_slivers(self, slivers, network=None, no_dupes=False):
- NITOSv1Node.remove_slivers(self.xml, slivers)
-
- def get_slice_attributes(self, network=None):
- attributes = []
- nodes_with_slivers = self.get_nodes_with_slivers()
- for default_attribute in self.get_default_sliver_attributes(network):
- attribute = default_attribute.copy()
- attribute['node_id'] = None
- attributes.append(attribute)
- for node in nodes_with_slivers:
- nodename = node['component_name']
- sliver_attributes = self.get_sliver_attributes(nodename, network)
- for sliver_attribute in sliver_attributes:
- sliver_attribute['node_id'] = nodename
- attributes.append(sliver_attribute)
- return attributes
-
- def add_sliver_attribute(self, component_id, name, value, network=None):
- nodes = self.get_nodes({'component_id': '*%s*' % component_id})
- if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
- node = nodes[0]
- slivers = NITOSv1Sliver.get_slivers(node)
- if slivers:
- sliver = slivers[0]
- NITOSv1Sliver.add_sliver_attribute(sliver, name, value)
- else:
- # should this be an assert / raise an exception?
- logger.error("WARNING: failed to find component_id %s" %
- component_id)
-
- def get_sliver_attributes(self, component_id, network=None):
- nodes = self.get_nodes({'component_id': '*%s*' % component_id})
- attribs = []
- if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
- node = nodes[0]
- slivers = NITOSv1Sliver.get_slivers(node.element)
- if slivers is not None and isinstance(slivers, list) and len(slivers) > 0:
- sliver = slivers[0]
- attribs = NITOSv1Sliver.get_sliver_attributes(sliver.element)
- return attribs
-
- def remove_sliver_attribute(self, component_id, name, value, network=None):
- attribs = self.get_sliver_attributes(component_id)
- for attrib in attribs:
- if attrib['name'] == name and attrib['value'] == value:
- # attrib.element.delete()
- parent = attrib.element.getparent()
- parent.remove(attrib.element)
-
- def add_default_sliver_attribute(self, name, value, network=None):
- if network:
- defaults = self.xml.xpath(
- "//network[@name='%s']/sliver_defaults" % network)
- else:
- defaults = self.xml.xpath("//sliver_defaults")
- if not defaults:
- if network:
- network_tag = self.xml.xpath("//network[@name='%s']" % network)
- else:
- network_tag = self.xml.xpath("//network")
- if isinstance(network_tag, list):
- network_tag = network_tag[0]
- defaults = network_tag.add_element('sliver_defaults')
- elif isinstance(defaults, list):
- defaults = defaults[0]
- NITOSv1Sliver.add_sliver_attribute(defaults, name, value)
-
- def get_default_sliver_attributes(self, network=None):
- if network:
- defaults = self.xml.xpath(
- "//network[@name='%s']/sliver_defaults" % network)
- else:
- defaults = self.xml.xpath("//sliver_defaults")
- if not defaults:
- return []
- return NITOSv1Sliver.get_sliver_attributes(defaults[0])
-
- def remove_default_sliver_attribute(self, name, value, network=None):
- attribs = self.get_default_sliver_attributes(network)
- for attrib in attribs:
- if attrib['name'] == name and attrib['value'] == value:
- # attrib.element.delete()
- parent = attrib.element.getparent()
- parent.remove(attrib.element)
-
- # Links
-
- def get_links(self, network=None):
- return []
-
- def get_link_requests(self):
- return []
-
- def add_links(self, links):
- pass
-
- def add_link_requests(self, links):
- pass
-
- # utility
-
- def merge(self, in_rspec):
- """
- Merge contents for specified rspec with current rspec
- """
-
- if not in_rspec:
- return
-
- from sfa.rspecs.rspec import RSpec
- if isinstance(in_rspec, RSpec):
- rspec = in_rspec
- else:
- rspec = RSpec(in_rspec)
- if rspec.version.type.lower() == 'protogeni':
- from sfa.rspecs.rspec_converter import RSpecConverter
- in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
- rspec = RSpec(in_rspec)
-
- # just copy over all networks
- current_networks = self.get_networks()
- networks = rspec.version.get_networks()
- for network in networks:
- current_network = network.get('name')
- if current_network and current_network not in current_networks:
- self.xml.append(network.element)
- current_networks.append(current_network)
-
- # Leases
-
- def get_leases(self, filter=None):
- return NITOSv1Lease.get_leases(self.xml, filter)
-
- def add_leases(self, leases_channels, network=None, no_dupes=False):
- leases, channels = leases_channels
- NITOSv1Lease.add_leases(self.xml, leases, channels)
-
- # Spectrum
-
- def get_channels(self, filter=None):
- return NITOSv1Channel.get_channels(self.xml, filter)
-
- def add_channels(self, channels, network=None, no_dupes=False):
- NITOSv1Channel.add_channels(self.xml, channels)
-
-
-if __name__ == '__main__':
- from sfa.rspecs.rspec import RSpec
- from sfa.rspecs.rspec_elements import *
- r = RSpec('/tmp/resources.rspec')
- r.load_rspec_elements(SFAv1.elements)
- print(r.get(RSpecElements.NODE))
+++ /dev/null
-#
-# Component is a SfaServer that implements the Component interface
-#
-import tempfile
-import os
-import time
-import sys
-
-from sfa.server.sfaserver import SfaServer
-
-# GeniLight client support is optional
-try:
- from egeni.geniLight_client import *
-except ImportError:
- GeniClientLight = None
-
-##
-# Component is a SfaServer that serves component operations.
-
-
-class Component(SfaServer):
- ##
- # Create a new registry object.
- #
- # @param ip the ip address to listen on
- # @param port the port to listen on
- # @param key_file private key filename of registry
- # @param cert_file certificate filename containing public key (could be a GID file)
-
- def __init__(self, ip, port, key_file, cert_file):
- SfaServer.__init__(self, ip, port, key_file,
- cert_file, interface='component')
# TODO: Can all three servers use the same "registry" certificate?
##
-# xxx todo not in the config yet
-component_port = 12346
import os
import os.path
import traceback
import sys
from optparse import OptionParser
-from sfa.util.sfalogging import logger
+from sfa.util.sfalogging import init_logger, logger
from sfa.util.xrn import get_authority, hrn_to_urn
from sfa.util.config import Config
+
from sfa.trust.gid import GID
from sfa.trust.trustedroots import TrustedRoots
from sfa.trust.certificate import Keypair, Certificate
from sfa.trust.hierarchy import Hierarchy
from sfa.trust.gid import GID
+
from sfa.server.sfaapi import SfaApi
from sfa.server.registry import Registries
from sfa.server.aggregate import Aggregates
+
from sfa.client.return_value import ReturnValue
# when installed in standalone we might not have httpd installed
if not os.path.isdir(logdir):
os.mkdir('/var/log/httpd')
- crashlog = os.open('%s/sfa_access_log' % logdir, os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
+ crashlog = os.open('%s/sfa_access_log' % logdir,
+ os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
def install_peer_certs(server_key_file, server_cert_file):
"""
- Attempt to install missing trusted gids and db records for
+ Attempt to install missing trusted gids and db records for
our federated interfaces
"""
# Attempt to get any missing peer gids
gid.save_to_file(gid_filename, save_parents=True)
message = "installed trusted cert for %s" % new_hrn
# log the message
- api.logger.info(message)
- except:
+ logger.info(message)
+ except Exception:
message = "interface: %s\tunable to install trusted gid for %s" % \
(api.interface, new_hrn)
- api.logger.log_exc(message)
+ logger.log_exc(message)
# doesnt matter witch one
update_cert_records(peer_gids)
def update_cert_records(gids):
"""
- Make sure there is a record in the registry for the specified gids.
+ Make sure there is a record in the registry for the specified gids.
Removes old records from the db.
"""
# import db stuff here here so this module can be loaded by PlcComponentApi
record = dbsession.query(RegRecord).filter_by(
hrn=hrn, type=type, pointer=-1).first()
if not record:
- record = RegRecord(dict={'type': type,
- 'hrn': hrn,
- 'authority': get_authority(hrn),
- 'gid': gid.save_to_string(save_parents=True),
- })
+ record = RegRecord(
+ dict={'type': type,
+ 'hrn': hrn,
+ 'authority': get_authority(hrn),
+ 'gid': gid.save_to_string(save_parents=True),
+ })
dbsession.add(record)
dbsession.commit()
parser = OptionParser(usage="sfa-start.py [options]")
parser.add_option("-r", "--registry", dest="registry", action="store_true",
help="run registry server", default=False)
- parser.add_option("-s", "--slicemgr", dest="sm", action="store_true",
- help="run slice manager", default=False)
parser.add_option("-a", "--aggregate", dest="am", action="store_true",
help="run aggregate manager", default=False)
- parser.add_option("-c", "--component", dest="cm", action="store_true",
- help="run component server", default=False)
- parser.add_option("-t", "--trusted-certs", dest="trusted_certs", action="store_true",
+ parser.add_option("-t", "--trusted-certs",
+ dest="trusted_certs", action="store_true",
help="refresh trusted certs", default=False)
parser.add_option("-d", "--daemon", dest="daemon", action="store_true",
help="Run as daemon.", default=False)
(options, args) = parser.parse_args()
config = Config()
+ init_logger('server')
logger.setLevelFromOptVerbose(config.SFA_API_LOGLEVEL)
# ge the server's key and cert
server_key_file, server_cert_file)
a.start()
- # start slice manager
- if (options.sm):
- from sfa.server.slicemgr import SliceMgr
- s = SliceMgr("", config.SFA_SM_PORT, server_key_file, server_cert_file)
- s.start()
-
- if (options.cm):
- from sfa.server.component import Component
- c = Component("", config.component_port,
- server_key_file, server_cert_file)
-# c = Component("", config.SFA_COMPONENT_PORT, server_key_file, server_cert_file)
- c.start()
-
if __name__ == "__main__":
try:
main()
- except:
- logger.log_exc_critical("SFA server is exiting")
+ except Exception:
+ logger.log_exc("SFA server is exiting")
+ exit(1)
+++ /dev/null
-#!/usr/bin/python
-from __future__ import print_function
-
-import sys
-import os
-import tempfile
-from optparse import OptionParser
-
-from sfa.util.faults import ConnectionKeyGIDMismatch
-from sfa.util.config import Config
-
-from sfa.trust.certificate import Keypair, Certificate
-from sfa.trust.credential import Credential
-from sfa.trust.gid import GID
-from sfa.trust.hierarchy import Hierarchy
-
-from sfa.client.sfaserverproxy import SfaServerProxy
-
-from sfa.planetlab.plxrn import hrn_to_pl_slicename, slicename_to_hrn
-
-KEYDIR = "/var/lib/sfa/"
-CONFDIR = "/etc/sfa/"
-
-
-def handle_gid_mismatch_exception(f):
- def wrapper(*args, **kwds):
- try:
- return f(*args, **kwds)
- except ConnectionKeyGIDMismatch:
- # clean regen server keypair and try again
- print("cleaning keys and trying again")
- clean_key_cred()
- return f(args, kwds)
-
- return wrapper
-
-
-def server_proxy(url=None, port=None, keyfile=None, certfile=None, verbose=False):
- """
- returns an xmlrpc connection to the service a the specified
- address
- """
- if url:
- url_parts = url.split(":")
- if len(url_parts) > 1:
- pass
- else:
- url = "http://%(url)s:%(port)s" % locals()
- else:
- # connect to registry by default
- config = Config()
- addr, port = config.SFA_REGISTRY_HOST, config.SFA_REGISTRY_PORT
- url = "http://%(addr)s:%(port)s" % locals()
-
- if verbose:
- print("Contacting registry at: %(url)s" % locals())
-
- server = SfaServerProxy(url, keyfile, certfile)
- return server
-
-
-def create_default_dirs():
- config = Config()
- hierarchy = Hierarchy()
- config_dir = config.config_path
- trusted_certs_dir = config.get_trustedroots_dir()
- authorities_dir = hierarchy.basedir
- all_dirs = [config_dir, trusted_certs_dir, authorities_dir]
- for dir in all_dirs:
- if not os.path.exists(dir):
- os.makedirs(dir)
-
-
-def has_node_key():
- key_file = KEYDIR + os.sep + 'server.key'
- return os.path.exists(key_file)
-
-
-def clean_key_cred():
- """
- remove the existing keypair and cred and generate new ones
- """
- files = ["server.key", "server.cert", "node.cred"]
- for f in files:
- filepath = KEYDIR + os.sep + f
- if os.path.isfile(filepath):
- os.unlink(f)
-
- # install the new key pair
- # GetCredential will take care of generating the new keypair
- # and credential
- GetCredential()
-
-
-def get_node_key(registry=None, verbose=False):
- # this call requires no authentication,
- # so we can generate a random keypair here
- subject = "component"
- (kfd, keyfile) = tempfile.mkstemp()
- (cfd, certfile) = tempfile.mkstemp()
- key = Keypair(create=True)
- key.save_to_file(keyfile)
- cert = Certificate(subject=subject)
- cert.set_issuer(key=key, subject=subject)
- cert.set_pubkey(key)
- cert.sign()
- cert.save_to_file(certfile)
-
- registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
- registry.get_key_from_incoming_ip()
-
-
-def create_server_keypair(keyfile=None, certfile=None, hrn="component", verbose=False):
- """
- create the server key/cert pair in the right place
- """
- key = Keypair(filename=keyfile)
- key.save_to_file(keyfile)
- cert = Certificate(subject=hrn)
- cert.set_issuer(key=key, subject=hrn)
- cert.set_pubkey(key)
- cert.sign()
- cert.save_to_file(certfile, save_parents=True)
-
-
-@handle_gid_mismatch_exception
-def GetCredential(registry=None, force=False, verbose=False):
- config = Config()
- hierarchy = Hierarchy()
- key_dir = hierarchy.basedir
- data_dir = config.data_path
- config_dir = config.config_path
- credfile = data_dir + os.sep + 'node.cred'
- # check for existing credential
- if not force and os.path.exists(credfile):
- if verbose:
- print("Loading Credential from %(credfile)s " % locals())
- cred = Credential(filename=credfile).save_to_string(save_parents=True)
- else:
- if verbose:
- print("Getting credential from registry")
- # make sure node private key exists
- node_pkey_file = config_dir + os.sep + "node.key"
- node_gid_file = config_dir + os.sep + "node.gid"
- if not os.path.exists(node_pkey_file) or \
- not os.path.exists(node_gid_file):
- get_node_key(registry=registry, verbose=verbose)
-
- gid = GID(filename=node_gid_file)
- hrn = gid.get_hrn()
- # create server key and certificate
- keyfile = data_dir + os.sep + "server.key"
- certfile = data_dir + os.sep + "server.cert"
- key = Keypair(filename=node_pkey_file)
- key.save_to_file(keyfile)
- create_server_keypair(keyfile, certfile, hrn, verbose)
-
- # get credential from registry
- registry = server_proxy(
- url=registry, keyfile=keyfile, certfile=certfile)
- cert = Certificate(filename=certfile)
- cert_str = cert.save_to_string(save_parents=True)
- cred = registry.GetSelfCredential(cert_str, 'node', hrn)
- Credential(string=cred).save_to_file(credfile, save_parents=True)
-
- return cred
-
-
-@handle_gid_mismatch_exception
-def get_trusted_certs(registry=None, verbose=False):
- """
- refresh our list of trusted certs.
- """
- # define useful variables
- config = Config()
- data_dir = config.SFA_DATA_DIR
- config_dir = config.SFA_CONFIG_DIR
- trusted_certs_dir = config.get_trustedroots_dir()
- keyfile = data_dir + os.sep + "server.key"
- certfile = data_dir + os.sep + "server.cert"
- node_gid_file = config_dir + os.sep + "node.gid"
- node_gid = GID(filename=node_gid_file)
- hrn = node_gid.get_hrn()
- # get credential
- cred = GetCredential(registry=registry, verbose=verbose)
- # make sure server key cert pair exists
- create_server_keypair(
- keyfile=keyfile, certfile=certfile, hrn=hrn, verbose=verbose)
- registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
- # get the trusted certs and save them in the right place
- if verbose:
- print("Getting trusted certs from registry")
- trusted_certs = registry.get_trusted_certs(cred)
- trusted_gid_names = []
- for gid_str in trusted_certs:
- gid = GID(string=gid_str)
- gid.decode()
- relative_filename = gid.get_hrn() + ".gid"
- trusted_gid_names.append(relative_filename)
- gid_filename = trusted_certs_dir + os.sep + relative_filename
- if verbose:
- print("Writing GID for %s as %s" % (gid.get_hrn(), gid_filename))
- gid.save_to_file(gid_filename, save_parents=True)
-
- # remove old certs
- all_gids_names = os.listdir(trusted_certs_dir)
- for gid_name in all_gids_names:
- if gid_name not in trusted_gid_names:
- if verbose:
- print("Removing old gid ", gid_name)
- os.unlink(trusted_certs_dir + os.sep + gid_name)
-
-
-@handle_gid_mismatch_exception
-def get_gids(registry=None, verbose=False):
- """
- Get the gid for all instantiated slices on this node and store it
- in /etc/sfa/slice.gid in the slice's filesystem
- """
- # define useful variables
- config = Config()
- data_dir = config.data_path
- config_dir = config.SFA_CONFIG_DIR
- trusted_certs_dir = config.get_trustedroots_dir()
- keyfile = data_dir + os.sep + "server.key"
- certfile = data_dir + os.sep + "server.cert"
- node_gid_file = config_dir + os.sep + "node.gid"
- node_gid = GID(filename=node_gid_file)
- hrn = node_gid.get_hrn()
- interface_hrn = config.SFA_INTERFACE_HRN
- # get credential
- cred = GetCredential(registry=registry, verbose=verbose)
- # make sure server key cert pair exists
- create_server_keypair(
- keyfile=keyfile, certfile=certfile, hrn=hrn, verbose=verbose)
- registry = server_proxy(url=registry, keyfile=keyfile, certfile=certfile)
-
- if verbose:
- print("Getting current slices on this node")
- # get a list of slices on this node
- from sfa.generic import Generic
- generic = Generic.the_flavour()
- api = generic.make_api(interface='component')
- xids_tuple = api.driver.nodemanager.GetXIDs()
- slices = eval(xids_tuple[1])
- slicenames = slices.keys()
-
- # generate a list of slices that dont have gids installed
- slices_without_gids = []
- for slicename in slicenames:
- if not os.path.isfile("/vservers/%s/etc/slice.gid" % slicename) \
- or not os.path.isfile("/vservers/%s/etc/node.gid" % slicename):
- slices_without_gids.append(slicename)
-
- # convert slicenames to hrns
- hrns = [slicename_to_hrn(interface_hrn, slicename)
- for slicename in slices_without_gids]
-
- # exit if there are no gids to install
- if not hrns:
- return
-
- if verbose:
- print("Getting gids for slices on this node from registry")
- # get the gids
- # and save them in the right palce
- records = registry.GetGids(hrns, cred)
- for record in records:
- # if this isnt a slice record skip it
- if not record['type'] == 'slice':
- continue
- slicename = hrn_to_pl_slicename(record['hrn'])
- # if this slice isnt really instatiated skip it
- if not os.path.exists("/vservers/%(slicename)s" % locals()):
- continue
-
- # save the slice gid in /etc/sfa/ in the vservers filesystem
- vserver_path = "/vservers/%(slicename)s" % locals()
- gid = record['gid']
- slice_gid_filename = os.sep.join([vserver_path, "etc", "slice.gid"])
- if verbose:
- print("Saving GID for %(slicename)s as %(slice_gid_filename)s" % locals())
- GID(string=gid).save_to_file(slice_gid_filename, save_parents=True)
- # save the node gid in /etc/sfa
- node_gid_filename = os.sep.join([vserver_path, "etc", "node.gid"])
- if verbose:
- print("Saving node GID for %(slicename)s as %(node_gid_filename)s" % locals())
- node_gid.save_to_file(node_gid_filename, save_parents=True)
-
-
-def dispatch(options, args):
-
- create_default_dirs()
- if options.key:
- if options.verbose:
- print("Getting the component's pkey")
- get_node_key(registry=options.registry, verbose=options.verbose)
- if options.certs:
- if options.verbose:
- print("Getting the component's trusted certs")
- get_trusted_certs(verbose=options.verbose)
- if options.gids:
- if options.verbose:
- print("Geting the component's GIDs")
- get_gids(verbose=options.verbose)
-
-
-def main():
- args = sys.argv
- prog_name = args[0]
- parser = OptionParser(usage="%(prog_name)s [options]" % locals())
- parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
- default=False, help="Be verbose")
- parser.add_option("-r", "--registry", dest="registry", default=None,
- help="Url of registry to contact")
- parser.add_option("-k", "--key", dest="key", action="store_true",
- default=False,
- help="Get the node's pkey from the registry")
- parser.add_option("-c", "--certs", dest="certs", action="store_true",
- default=False,
- help="Get the trusted certs from the registry")
- parser.add_option("-g", "--gids", dest="gids", action="store_true",
- default=False,
- help="Get gids for all the slices on the component")
-
- (options, args) = parser.parse_args()
-
- dispatch(options, args)
-
-if __name__ == '__main__':
- main()
+# pylint: disable=c0111
+
import os
import os.path
import datetime
-from sfa.util.faults import SfaFault, SfaAPIError, RecordNotFound
+from sfa.util.faults import SfaFault, RecordNotFound
from sfa.util.genicode import GENICODE
from sfa.util.config import Config
from sfa.util.cache import Cache
####################
-class SfaApi (XmlrpcApi):
+class SfaApi(XmlrpcApi):
"""
An SfaApi instance is a basic xmlrpc service
augmented with the local cryptographic material and hrn
It also has the notion of its own interface (a string describing
- whether we run a registry, aggregate or slicemgr) and has
- the notion of neighbour sfa services as defined
+ whether we run a registry, or aggregate) and has
+ the notion of neighbour sfa services as defined
in /etc/sfa/{aggregates,registries}.xml
Finally it contains a cache instance
- It gets augmented by the generic layer with
+ It gets augmented by the generic layer with
(*) an instance of manager (actually a manager module for now)
beware that this is shared among all instances of api
(*) an instance of a testbed driver
def server_proxy(self, interface, cred, timeout=30):
"""
Returns a connection to the specified interface. Use the specified
- credential to determine the caller and look for the caller's key/cert
- in the registry hierarchy cache.
+ credential to determine the caller and look for the caller's key/cert
+ in the registry hierarchy cache.
"""
from sfa.trust.hierarchy import Hierarchy
if not isinstance(cred, Credential):
return delegated_cred
def _getCredential(self):
- """
- Get our credential from a remote registry
+ """
+ Get our credential from a remote registry
"""
from sfa.server.registry import Registries
registries = Registries()
def prepare_response(self, result, method=""):
"""
- Converts the specified result into a standard GENI compliant
- response
+ Converts the specified result into a standard GENI compliant
+ response
"""
# as of dec 13 2011 we only support API v2
- if self.interface.lower() in ['aggregate', 'slicemgr']:
+ if self.interface.lower() in ['aggregate']:
result = self.prepare_response_am(result)
return XmlrpcApi.prepare_response(self, result, method)
-##
-# This module implements a general-purpose server layer for sfa.
-# The same basic server should be usable on the registry, component, or
-# other interfaces.
-#
-# TODO: investigate ways to combine this with existing PLC server?
-##
+"""
+This module implements a general-purpose server layer for sfa.
+The same basic server should be usable on the registry or
+other interfaces.
+"""
import threading
+++ /dev/null
-import os
-import sys
-import datetime
-import time
-from sfa.server.sfaserver import SfaServer
-
-
-class SliceMgr(SfaServer):
-
- ##
- # Create a new slice manager object.
- #
- # @param ip the ip address to listen on
- # @param port the port to listen on
- # @param key_file private key filename of registry
- # @param cert_file certificate filename containing public key (could be a GID file)
-
- def __init__(self, ip, port, key_file, cert_file, config="/etc/sfa/sfa_config"):
- SfaServer.__init__(self, ip, port, key_file, cert_file, 'slicemgr')
SOAPpy = None
####################
-#from sfa.util.faults import SfaNotImplemented, SfaAPIError, SfaInvalidAPIMethod, SfaFault
from sfa.util.faults import SfaInvalidAPIMethod, SfaAPIError, SfaFault
from sfa.util.sfalogging import logger
from sfa.util.py23 import xmlrpc_client
class XmlrpcApi:
"""
- The XmlrpcApi class implements a basic xmlrpc (or soap) service
+ The XmlrpcApi class implements a basic xmlrpc (or soap) service
"""
protocol = None
methods, fromlist=[methods])
self.methods = methods_module.all
- self.logger = logger
-
def callable(self, method):
"""
Return a new instance of the specified method.
callablemethod = getattr(module, classname)(self)
return getattr(module, classname)(self)
except (ImportError, AttributeError):
- self.logger.log_exc("Error importing method: %s" % method)
+ logger.log_exc("Error importing method: %s" % method)
raise SfaInvalidAPIMethod(method)
def call(self, source, method, *args):
result = self.call(source, method, *args)
except SfaFault as fault:
result = fault
- self.logger.log_exc("XmlrpcApi.handle has caught Exception")
+ logger.log_exc("XmlrpcApi.handle has caught Exception")
except Exception as fault:
- self.logger.log_exc("XmlrpcApi.handle has caught Exception")
+ logger.log_exc("XmlrpcApi.handle has caught Exception")
result = SfaAPIError(fault)
# Return result
from sfa.util.sfalogging import logger
# this tends to generate quite some logs for little or no value
-debug_verify_chain = False
+debug_verify_chain = True
glo_passphrase_callback = None
# certs)
if string is None or string.strip() == "":
- logger.warn("Empty string in load_from_string")
+ logger.warning("Empty string in load_from_string")
return
string = string.strip()
OpenSSL.crypto.FILETYPE_PEM, parts[0])
if self.x509 is None:
- logger.warn(
+ logger.warning(
"Loaded from string but cert is None: {}".format(string))
# if there are more certs, then create a parent and let the parent load
def save_to_string(self, save_parents=True):
if self.x509 is None:
- logger.warn("None cert in certificate.save_to_string")
+ logger.warning("None cert in certificate.save_to_string")
return ""
string = OpenSSL.crypto.dump_certificate(
OpenSSL.crypto.FILETYPE_PEM, self.x509)
# pyOpenSSL does not have a way to get extensions
m2x509 = M2Crypto.X509.load_cert_string(certstr)
if m2x509 is None:
- logger.warn("No cert loaded in get_extension")
+ logger.warning("No cert loaded in get_extension")
return None
if m2x509.get_ext(name) is None:
return None
if field in self.data:
raise Exception("Cannot set {} more than once".format(field))
self.data[field] = string
- self.add_extension(field, 0, string)
+ # call str() because we've seen unicode there
+ # and the underlying C code doesn't like it
+ self.add_extension(field, 0, str(string))
##
# Return the data string that was previously set with set_data
# @param cert certificate object
def is_signed_by_cert(self, cert):
- logger.debug("Certificate.is_signed_by_cert -> invoking verify")
k = cert.get_pubkey()
+ logger.debug("Certificate.is_signed_by_cert -> verify on {}\n"
+ "with pubkey {}"
+ .format(self, k))
result = self.verify(k)
return result
trusted_cert.pretty_name()))
else:
logger.debug("verify_chain: not a direct"
- " descendant of a trusted root")
+ " descendant of trusted root #{}".format(i))
# if there is no parent, then no way to verify the chain
if not self.parent:
# extension and hope there are no other basicConstraints
if not self.parent.isCA and not (
self.parent.get_extension('basicConstraints') == 'CA:TRUE'):
- logger.warn("verify_chain: cert {}'s parent {} is not a CA"
- .format(self.pretty_name(), self.parent.pretty_name()))
+ logger.warning("verify_chain: cert {}'s parent {} is not a CA"
+ .format(self.pretty_name(), self.parent.pretty_name()))
raise CertNotSignedByParent("{}: Parent {} not a CA"
.format(self.pretty_name(),
self.parent.pretty_name()))
self.gidCaller.get_urn(),
self.gidObject.get_urn(),
oldAttr.name, oldAttr.value, attr.value)
- logger.warn(msg)
+ logger.warning(msg)
# raise CredentialNotVerifiable(
# "Can't encode new valid delegated credential: {}"
# .format(msg))
def sign(self):
if not self.issuer_privkey:
- logger.warn("Cannot sign credential (no private key)")
+ logger.warning("Cannot sign credential (no private key)")
return
if not self.issuer_gid:
- logger.warn("Cannot sign credential (no issuer gid)")
+ logger.warning("Cannot sign credential (no issuer gid)")
return
doc = parseString(self.get_xml())
sigs = doc.getElementsByTagName("signatures")[0]
#!/usr/bin/python
-#----------------------------------------------------------------------
-# Copyright (c) 2008 Board of Trustees, Princeton University
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and/or hardware specification (the "Work") to
-# deal in the Work without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Work, and to permit persons to whom the Work
-# is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Work.
-#
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
-# IN THE WORK.
-#----------------------------------------------------------------------
+"""
+A reroutable logger that can handle deep tracebacks
+
+Requirements:
+
+* for legacy, we want all our code to just do:
+
+ from sfa.util.sfalogging import logger
+ ...
+ logger.info('blabla')
+
+* depending on whether the code runs (a) inside the server,
+ (b) as part of sfa-import, or (c) as part of the sfi CLI,
+ we want these messages to be directed in different places
+
+* also because troubleshooting is very painful, we need a better way
+ to report stacks when an exception occurs.
+
+Implementation:
+
+* we use a single unique logger name 'sfa' (wrt getLogger()),
+ and provide an auxiliary function `init_logger()` that
+ accepts for its `context` parameter one of :
+ `server`, `import` `sfi` or `console`
+ It will then reconfigure the 'sfa' logger to do the right thing
+
+* also we create our own subclass of loggers, and install it
+ with logging.setLoggerClass(), so we can add our own customized
+ `log_exc()` method
+
+"""
+
+# pylint: disable=c0111, c0103, w1201
from __future__ import print_function
import os
+import os.path
import sys
import traceback
import logging
import logging.handlers
+import logging.config
-CRITICAL = logging.CRITICAL
-ERROR = logging.ERROR
-WARNING = logging.WARNING
-INFO = logging.INFO
-DEBUG = logging.DEBUG
-
-# a logger that can handle tracebacks
-
-
-class _SfaLogger:
-
- def __init__(self, logfile=None, loggername=None, level=logging.INFO):
- # default is to locate loggername from the logfile if avail.
- if not logfile:
- # loggername='console'
- # handler=logging.StreamHandler()
- #handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))
- logfile = "/var/log/sfa.log"
-
- if not loggername:
- loggername = os.path.basename(logfile)
- try:
- handler = logging.handlers.RotatingFileHandler(
- logfile, maxBytes=1000000, backupCount=5)
- except IOError:
- # This is usually a permissions error because the file is
- # owned by root, but httpd is trying to access it.
- tmplogfile = os.path.join(os.getenv("TMPDIR",
- os.getenv("TMP", os.path.normpath("/tmp"))),
- os.path.basename(logfile))
- tmplogfile = os.path.normpath(tmplogfile)
-
- tmpdir = os.path.dirname(tmplogfile)
- if tmpdir and tmpdir != "" and not os.path.exists(tmpdir):
- os.makedirs(tmpdir)
-
- # In strange uses, 2 users on same machine might use same code,
- # meaning they would clobber each others files
- # We could (a) rename the tmplogfile, or (b)
- # just log to the console in that case.
- # Here we default to the console.
- if os.path.exists(tmplogfile) and not os.access(tmplogfile, os.W_OK):
- loggername = loggername + "-console"
- handler = logging.StreamHandler()
- else:
- handler = logging.handlers.RotatingFileHandler(
- tmplogfile, maxBytes=1000000, backupCount=5)
- handler.setFormatter(logging.Formatter(
- "%(asctime)s - %(levelname)s - %(message)s"))
- self.logger = logging.getLogger(loggername)
- self.logger.setLevel(level)
- # check if logger already has the handler we're about to add
- handler_exists = False
- for l_handler in self.logger.handlers:
- if l_handler.baseFilename == handler.baseFilename and \
- l_handler.level == handler.level:
- handler_exists = True
-
- if not handler_exists:
- self.logger.addHandler(handler)
-
- self.loggername = loggername
-
- def setLevel(self, level):
- self.logger.setLevel(level)
+# so that users of this module don't need to import logging
+from logging import (CRITICAL, ERROR, WARNING, INFO, DEBUG)
+
+
+class SfaLogger(logging.getLoggerClass()):
+ """
+ a rewrite of old _SfaLogger class that was way too cumbersome
+ keep this as much as possible though
+ """
# shorthand to avoid having to import logging all over the place
def setLevelDebug(self):
- self.logger.setLevel(logging.DEBUG)
+ self.setLevel(DEBUG)
def debugEnabled(self):
- return self.logger.getEffectiveLevel() == logging.DEBUG
+ return self.getEffectiveLevel() == logging.DEBUG
# define a verbose option with s/t like
- # parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0)
+ # parser.add_option("-v", "--verbose", action="count",
+ # dest="verbose", default=0)
# and pass the coresponding options.verbose to this method to adjust level
def setLevelFromOptVerbose(self, verbose):
if verbose == 0:
- self.logger.setLevel(logging.WARNING)
+ self.setLevel(logging.WARNING)
elif verbose == 1:
- self.logger.setLevel(logging.INFO)
+ self.setLevel(logging.INFO)
elif verbose >= 2:
- self.logger.setLevel(logging.DEBUG)
- # in case some other code needs a boolean
+ self.setLevel(logging.DEBUG)
- def getBoolVerboseFromOpt(self, verbose):
+ # in case some other code needs a boolean
+ @staticmethod
+ def getBoolVerboseFromOpt(verbose):
return verbose >= 1
- def getBoolDebugFromOpt(self, verbose):
+ @staticmethod
+ def getBoolDebugFromOpt(verbose):
return verbose >= 2
- ####################
- def info(self, msg):
- self.logger.info(msg)
-
- def debug(self, msg):
- self.logger.debug(msg)
-
- def warn(self, msg):
- self.logger.warn(msg)
-
- # some code is using logger.warn(), some is using logger.warning()
- def warning(self, msg):
- self.logger.warning(msg)
+ def log_exc(self, message, limit=100):
+ """
+ standard logger has an exception() method but this will
+ dump the stack only between the frames
+ (1) that does `raise` and (2) the one that does `except`
- def error(self, msg):
- self.logger.error(msg)
+ log_exc() has a limit argument that allows to see deeper than that
- def critical(self, msg):
- self.logger.critical(msg)
-
- # logs an exception - use in an except statement
- def log_exc(self, message):
+ use limit=None to get the same behaviour as exception()
+ """
self.error("%s BEG TRACEBACK" % message + "\n" +
- traceback.format_exc().strip("\n"))
+ traceback.format_exc(limit=limit).strip("\n"))
self.error("%s END TRACEBACK" % message)
- def log_exc_critical(self, message):
- self.critical("%s BEG TRACEBACK" % message + "\n" +
- traceback.format_exc().strip("\n"))
- self.critical("%s END TRACEBACK" % message)
-
# for investigation purposes, can be placed anywhere
- def log_stack(self, message):
- to_log = "".join(traceback.format_stack())
+ def log_stack(self, message, limit=100):
+ to_log = "".join(traceback.format_stack(limit=limit))
self.info("%s BEG STACK" % message + "\n" + to_log)
self.info("%s END STACK" % message)
- def enable_console(self, stream=sys.stdout):
+ def enable_console(self):
formatter = logging.Formatter("%(message)s")
- handler = logging.StreamHandler(stream)
+ handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
- self.logger.addHandler(handler)
-
-
-logger = _SfaLogger(loggername='info', level=logging.INFO)
-
-sfi_logger = _SfaLogger(logfile=os.path.expanduser("~/.sfi/") + 'sfi.log',
- loggername='sfilog', level=logging.DEBUG)
-########################################
-import time
-
-
-def profile(logger):
- """
- Prints the runtime of the specified callable. Use as a decorator, e.g.,
-
- @profile(logger)
- def foo(...):
- ...
- """
- def logger_profile(callable):
- def wrapper(*args, **kwds):
- start = time.time()
- result = callable(*args, **kwds)
- end = time.time()
- args = map(str, args)
- args += ["%s = %s" % (name, str(value))
- for (name, value) in kwds.iteritems()]
- # should probably use debug, but then debug is not always enabled
- logger.info("PROFILED %s (%s): %.02f s" %
- (callable.__name__, ", ".join(args), end - start))
- return result
- return wrapper
- return logger_profile
-
-
-if __name__ == '__main__':
- print('testing sfalogging into logger.log')
- logger1 = _SfaLogger('logger.log', loggername='std(info)')
- logger2 = _SfaLogger('logger.log', loggername='error', level=logging.ERROR)
- logger3 = _SfaLogger('logger.log', loggername='debug', level=logging.DEBUG)
-
- for logger, msg in ((logger1, "std(info)"), (logger2, "error"), (logger3, "debug")):
-
- print("====================", msg, logger.logger.handlers)
-
- logger.enable_console()
- logger.critical("logger.critical")
- logger.error("logger.error")
- logger.warn("logger.warning")
- logger.info("logger.info")
- logger.debug("logger.debug")
- logger.setLevel(logging.DEBUG)
- logger.debug("logger.debug again")
-
- @profile(logger)
- def sleep(seconds=1):
- time.sleep(seconds)
-
- logger.info('console.info')
- sleep(0.5)
- logger.setLevel(logging.DEBUG)
- sleep(0.25)
+ self.addHandler(handler)
+
+
+# install our class as the default
+logging.setLoggerClass(SfaLogger)
+
+
+# configure
+# this is *NOT* passed to dictConfig as-is
+# instead we filter 'handlers' and 'loggers'
+# to contain just one entry
+# so make sure that 'handlers' and 'loggers'
+# have the same set of keys
+def logging_config(context):
+ if context == 'server':
+ handlername = 'file'
+ filename = '/var/log/sfa.log'
+ level = 'INFO'
+ elif context == 'import':
+ handlername = 'file'
+ filename = '/var/log/sfa-import.log'
+ level = 'INFO'
+ elif context == 'cli':
+ handlername = 'file'
+ filename = os.path.expanduser("~/.sfi.log")
+ level = 'DEBUG'
+ elif context == 'console':
+ handlername = 'stdout'
+ filename = 'ignored'
+ level = 'INFO'
+ else:
+ print("Cannot configure logging - exiting")
+ exit(1)
+
+ config = {
+ 'version': 1,
+ # IMPORTANT: we may be imported by something else, so:
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'standard': {
+ 'datefmt': '%m-%d %H:%M:%S',
+ 'format': ('%(asctime)s %(levelname)s '
+ '%(filename)s:%(lineno)d %(message)s'),
+ },
+ },
+ # fill in later with just the one needed
+ # otherwise a dummy 'ignored' file gets created
+ 'handlers': {
+ },
+ 'loggers': {
+ 'sfa': {
+ 'handlers': [handlername],
+ 'level': level,
+ 'propagate': False,
+ },
+ },
+ }
+ if handlername == 'stdout':
+ config['handlers']['stdout'] = {
+ 'level': level,
+ 'formatter': 'standard',
+ 'class': 'logging.StreamHandler',
+ }
+ else:
+ config['handlers']['file'] = {
+ 'filename': filename,
+ 'level': level,
+ 'formatter': 'standard',
+ 'class': 'logging.handlers.TimedRotatingFileHandler',
+ # every monday and during 3 months
+ 'when': 'w0',
+ 'interval': 1,
+ 'backupCount': 12,
+ }
+ return config
+
+
+logger = logging.getLogger('sfa')
+
+
+def init_logger(context):
+ logging.config.dictConfig(logging_config(context))
+
+
+# if the user process does not do anything
+# like for the miscell testers and other certificate
+# probing/dumping utilities
+init_logger('console')
--- /dev/null
+[Unit]
+Description=SFA Aggregate Manager (AM)
+Requires=sfa-db.service
+After=sfa-db.service
+
+[Service]
+ExecStart=/bin/bash -c "/usr/bin/sfa-start.py -a"
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+[Unit]
+Description=SFA Database service
+Requires=postgresql.service
+After=postgresql.service
+
+[Service]
+Type=oneshot
+ExecStart=/bin/bash -c "/usr/bin/sfa-setup.sh start-db"
+RemainAfterExit=true
+# systemd insists that we define this one
+# that does not seem useful in our case
+ExecStop=/bin/true
+StandardOutput=journal
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+# this is meant to be installed under /etc/systemd/system
+[Unit]
+Description=SFA Registry Service
+Requires=sfa-db.service
+After=sfa-db.service
+
+[Service]
+ExecStart=/bin/bash -c "/usr/bin/sfa-start.py -r"
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+#!/bin/bash
+
+set -e
+
+####################
+PGDATA=/var/lib/pgsql/data/
+PGLOG=/var/log/pgsql
+
+postgresql_conf=$PGDATA/postgresql.conf
+pg_hba_conf=$PGDATA/pg_hba.conf
+
+# SFA consolidated (merged) config file
+sfa_whole_config=/etc/sfa/sfa_config
+# SFA default config (read-only template)
+sfa_default_config=/etc/sfa/default_config.xml
+# SFA local (site-dependent) file
+sfa_local_config=/etc/sfa/configs/site_config
+sfa_local_config_xml=/etc/sfa/configs/site_config.xml
+sfa_local_config_sh=/etc/sfa/sfa_config.sh
+
+# Regenerate configuration files - almost verbatim from plc.init
+function reload () {
+
+ # Regenerate the main configuration file from default values
+ # overlaid with site-specific and current values.
+ files=( $sfa_default_config $sfa_local_config )
+ tmp=$(mktemp /tmp/sfa_config.XXXXXX)
+ sfa-config --python "${files[@]}" > $tmp
+ if [ $? -eq 0 ] ; then
+ mv $tmp $sfa_whole_config
+ chmod 444 $sfa_whole_config
+ else
+ echo "SFA: Warning: Invalid configuration file(s) detected"
+ rm -f $tmp
+ exit 1
+ fi
+
+ # Convert configuration to various formats
+ if [ -f $sfa_local_config_xml ] ; then
+ sfa-config --python $sfa_local_config_xml > $sfa_local_config
+ rm $sfa_local_config_xml
+ fi
+ if [ -n "$force" -o $sfa_local_config -nt $sfa_whole_config ] ; then
+ sfa-config --python $sfa_default_config $sfa_local_config > $sfa_whole_config
+ fi
+ if [ -n "$force" -o $sfa_whole_config -nt /etc/sfa/sfa_config.sh ] ; then
+ sfa-config --shell $sfa_default_config $sfa_local_config > /etc/sfa/sfa_config.sh
+ fi
+
+ # reload the shell version
+ source $sfa_local_config_sh
+
+}
+
+function postgresql_setting() {
+ param="$1"; shift
+ value="$1"; shift
+
+ # is that setting already present in file ?
+ if grep --extended-regexp -q "#?${param} *=.*" $postgresql_conf; then
+ sed --regexp-extended --in-place \
+ --expression="s|#?${param} = .*|${param} = ${value}|" \
+ $postgresql_conf
+ else
+ echo "${param} = ${value}" >> $postgresql_conf
+ fi
+}
+
+function start-db () {
+
+ # source shell config if present
+ # but it might not be present the very first time
+ [ ! -f $sfa_local_config_sh ] && reload
+
+ source $sfa_local_config_sh
+
+ # Export so that we do not have to specify -p to psql invocations
+ export PGPORT=$SFA_DB_PORT
+
+ # only if enabled
+ # this is because the DB can run on a separate box as well
+ [ "$SFA_DB_ENABLED" == 1 -o "$SFA_DB_ENABLED" == True ] || return
+
+ postgresql_setting port "'$SFA_DB_PORT'"
+ mkdir -p $PGLOG
+ chown postgres:postgres $PGLOG
+ postgresql_setting log_directory "'$PGLOG'"
+
+ ######## /var/lib/pgsql/data
+ # Fix ownership (rpm installation may have changed it)
+ chown -R -H postgres:postgres $(dirname $PGDATA)
+
+ # PostgreSQL must be started at least once to bootstrap
+ # /var/lib/pgsql/data
+ if [ ! -f $postgresql_conf ] ; then
+ /usr/bin/postgresql-setup --initdb --unit postgresql
+ fi
+
+ ######## /var/lib/pgsql/data/postgresql.conf
+ registry_ip=""
+ foo=$(python -c "import socket; print socket.gethostbyname('$SFA_REGISTRY_HOST')") && registry_ip="$foo"
+ db_ip=""
+ foo=$(python -c "import socket; print socket.gethostbyname('$SFA_DB_HOST')") && db_ip="$foo"
+ # Enable DB server. drop Postgresql<=7.x
+ # PostgreSQL >=8.0 defines listen_addresses
+ # listen on a specific IP + localhost, more robust when run within a vserver
+ sed -i -e '/^listen_addresses/d' $postgresql_conf
+ case "$db_ip" in
+ ""|127.0.0.1|localhost*)
+ postgresql_setting listen_addresses "'localhost'" ;;
+ *)
+ postgresql_setting listen_addresses "'${db_ip},localhost'" ;;
+ esac
+ postgresql_setting timezone "'UTC'"
+ postgresql_setting log_timezone "'UTC'"
+
+ ######## /var/lib/pgsql/data/pg_hba.conf
+ # remove/recreate passwordless localhost entry
+ sed -i -e "/^local/d" $pg_hba_conf
+ echo "local all all trust" >> $pg_hba_conf
+
+ # Disable access to our DB from all hosts
+ sed -i -e "/^host ${SFA_DB_NAME}/d" $pg_hba_conf
+ # grant access
+ {
+ echo "host $SFA_DB_NAME $SFA_DB_USER 127.0.0.1/32 password"
+ [ -n "$registry_ip" ] && echo "host $SFA_DB_NAME $SFA_DB_USER ${registry_ip}/32 password"
+ } >> $pg_hba_conf
+
+ # Fix ownership (sed -i changes it)
+ chown postgres:postgres $postgresql_conf $pg_hba_conf
+
+ ######## compute a password if needed
+ if [ -z "$SFA_DB_PASSWORD" ] ; then
+ SFA_DB_PASSWORD=$(uuidgen)
+ sfa-config --category=sfa_db --variable=password --value="$SFA_DB_PASSWORD" --save=$sfa_local_config $sfa_local_config >& /dev/null
+ reload
+ fi
+
+ # tell postgresql that settings have changed
+ # note that changes to listen_addresses do require a full restart
+ # but it's too intrusive to do this each time, as it would in turn
+ # require a restart of the plc service
+ su - postgres bash -c "pg_ctl reload"
+
+ ######## make sure we have the user and db created
+ # user
+ if ! psql -U $SFA_DB_USER -c "" template1 >/dev/null 2>&1 ; then
+ psql -U postgres -c "CREATE USER $SFA_DB_USER PASSWORD '$SFA_DB_PASSWORD'" template1 >& /dev/null
+ else
+ psql -U postgres -c "ALTER USER $SFA_DB_USER WITH PASSWORD '$SFA_DB_PASSWORD'" template1 >& /dev/null
+ fi
+
+ # db
+ if ! psql -U $SFA_DB_USER -c "" $SFA_DB_NAME >/dev/null 2>&1 ; then
+ createdb -U postgres --template=template0 --encoding=UNICODE --owner=$SFA_DB_USER $SFA_DB_NAME
+ fi
+
+ # create schema; sfaadmin.py is safer than just sfaadmin
+ sfaadmin.py reg sync_db
+
+}
+
+usage="$0 start-db|reload
+ start-db: configure postgresql database and restart postgresql
+ reload: recompute miscell configuration files after changes are made in master config
+"
+
+func="$1"; shift
+
+case "$func" in
+ start-db|reload) $func;;
+ *) echo "$usage"; exit 1;;
+esac
key = None
cert = None
credential = None
- type = None
+ type = None
def __init__(self, options):
try: self.config = config = Config(options.config_file)
except:
self.cert.set_pubkey(self.key)
self.cert.set_issuer(self.key, self.config.SFI_USER)
self.cert.sign()
- self.cert.save_to_file(cert_file)
+ self.cert.save_to_file(cert_file)
SFI_AGGREGATE = config.SFI_SM.replace('12347', '12346')
SFI_CM = 'http://' + options.cm_host + ':12346'
self.registry = SfaServerProxy(config.SFI_REGISTRY, key_file, cert_file)
# test from components persepctive
self.type = 'user'
self.credential = self.GetCredential(self.hrn)
-
+
def GetCredential(self, hrn = None, type = 'user'):
- if not hrn: hrn = self.hrn
+ if not hrn: hrn = self.hrn
if hrn == self.hrn:
cert = self.cert.save_to_string(save_parents=True)
request_hash = self.key.compute_hash([cert, 'user', hrn])
else:
if not self.credential:
self.credential = self.GetCredential(self.hrn, 'user')
- return self.registry.GetCredential(self.credential, type, hrn)
+ return self.registry.GetCredential(self.credential, type, hrn)
class BasicTestCase(unittest.TestCase):
def __init__(self, testname, client, test_slice=None):
unittest.TestCase.__init__(self, testname)
self.client = client
self.slice = test_slice
-
+
def setUp(self):
self.registry = self.client.registry
self.aggregate = self.client.aggregate
self.cm = self.client.cm
self.credential = self.client.credential
self.hrn = self.client.hrn
- self.type = self.client.type
-
+ self.type = self.client.type
+
# Registry tests
class RegistryTest(BasicTestCase):
try: self.registry.Remove(auth_cred, record['type'], record['hrn'])
except: pass
-
+
def testRegisterPeerObject(self):
assert True
-
+
def testUpdate(self):
authority = get_authority(self.hrn)
auth_cred = self.client.GetCredential(authority, 'authority')
records = self.registry.Resolve(self.credential, self.hrn)
if not records: assert False
record = records[0]
- self.registry.update(auth_cred, record)
+ self.registry.update(auth_cred, record)
def testResolve(self):
authority = get_authority(self.hrn)
self.registry.Resolve(self.credential, self.hrn)
-
+
def testRemove(self):
authority = get_authority(self.hrn)
auth_cred = self.client.GetCredential(authority, 'authority')
try:
self.registry.Resolve(self.credential, record['hrn'])
assert False
- except:
+ except:
assert True
-
+
def testRemovePeerObject(self):
assert True
def testList(self):
authority = get_authority(self.client.hrn)
self.registry.List(self.credential, authority)
-
+
def testGetRegistries(self):
self.registry.get_registries(self.credential)
-
+
def testGetAggregates(self):
self.registry.get_aggregates(self.credential)
def testGetTrustedCerts(self):
# this should fail unless we are a node
callable = self.registry.get_trusted_certs
- server_exception = False
+ server_exception = False
try:
callable(self.credential)
except ServerException:
finally:
if self.type in ['user'] and not server_exception:
assert False
-
+
class AggregateTest(BasicTestCase):
def setUp(self):
BasicTestCase.setUp(self)
-
+
def testGetSlices(self):
self.aggregate.ListSlices(self.credential)
RSpec(xml=slice_rspec)
def testCreateSlice(self):
- # get availabel resources
+ # get availabel resources
rspec = self.aggregate.get_resources(self.credential)
slice_credential = self.client.GetCredential(self.slice['hrn'], 'slice')
self.aggregate.CreateSliver(slice_credential, self.slice['hrn'], rspec)
rspec = self.aggregate.get_resources(self.credential)
ticket = self.aggregate.GetTicket(slice_credential, self.slice['hrn'], rspec)
# will raise an exception if the ticket inst valid
- SfaTicket(string=ticket)
-
-class SlicemgrTest(AggregateTest):
- def setUp(self):
- AggregateTest.setUp(self)
-
- # force calls to go through slice manager
- self.aggregate = self.sm
-
- # get the slice credential
-
-
-class ComponentTest(BasicTestCase):
- def setUp(self):
- BasicTestCase.setUp(self)
- self.slice_cred = self.client.GetCredential(self.slice['hrn'], 'slice')
-
- def testStartSlice(self):
- self.cm.start_slice(self.slice_cred, self.slice['hrn'])
-
- def testStopSlice(self):
- self.cm.stop_slice(self.slice_cred, self.slice['hrn'])
-
- def testDeleteSlice(self):
- self.cm.DeleteSliver(self.slice_cred, self.slice['hrn'],"call-id-delete-slice-cm")
-
- def testRestartSlice(self):
- self.cm.restart_slice(self.slice_cred, self.slice['hrn'])
-
- def testGetSlices(self):
- self.cm.ListSlices(self.slice_cred, self.slice['hrn'])
-
- def testRedeemTicket(self):
- rspec = self.aggregate.get_resources(self.credential)
- ticket = self.aggregate.GetTicket(slice_cred, self.slice['hrn'], rspec)
- self.cm.redeem_ticket(slice_cred, ticket)
-
+ SfaTicket(string=ticket)
def test_names(testcase):
return [name for name in dir(testcase) if name.startswith('test')]
'type': 'slice', 'researcher': [client.hrn]}
client.registry.Register(auth_cred, slice_record)
return slice_record
-
+
def DeleteSliver(client, slice):
authority = get_authority(client.hrn)
auth_cred = client.GetCredential(authority, 'authority')
if slice:
client.registry.Remove(auth_cred, 'slice', slice['hrn'])
-
+
if __name__ == '__main__':
args = sys.argv
default=False, help='run registry tests')
parser.add_option('-a', '--aggregate', dest='aggregate', action='store_true',
default=False, help='run aggregate tests')
- parser.add_option('-s', '--slicemgr', dest='slicemgr', action='store_true',
- default=False, help='run slicemgr tests')
parser.add_option('-c', '--component', dest='component', action='store_true',
default=False, help='run component tests')
- parser.add_option('-d', '--cm_host', dest='cm_host', default=default_cm,
+ parser.add_option('-d', '--cm_host', dest='cm_host', default=default_cm,
help='dns name of component to test. default is %s' % default_cm)
parser.add_option('-A', '--all', dest='all', action='store_true',
default=False, help='run component tests')
-
+
options, args = parser.parse_args()
suite = unittest.TestSuite()
client = Client(options)
test_slice = {}
-
+
# create the test slice if necessary
- if options.all or options.slicemgr or options.aggregate \
- or options.component:
+ if options.all or options.aggregate or options.component:
test_slice = CreateSliver(client)
if options.registry or options.all:
for name in test_names(RegistryTest):
suite.addTest(RegistryTest(name, client))
- if options.aggregate or options.all:
+ if options.aggregate or options.all:
for name in test_names(AggregateTest):
suite.addTest(AggregateTest(name, client, test_slice))
- if options.slicemgr or options.all:
- for name in test_names(SlicemgrTest):
- suite.addTest(SlicemgrTest(name, client, test_slice))
-
- if options.component or options.all:
+ if options.component or options.all:
for name in test_names(ComponentTest):
suite.addTest(ComponentTest(name, client, test_slice))
-
- # run tests
+
+ # run tests
unittest.TextTestRunner(verbosity=2).run(suite)
# remove teset slice
# attempt to update at build-time
-INTERFACES=sfa registry slice-manager aggregate
+INTERFACES=sfa registry aggregate
WSDLS = $(foreach interface,$(INTERFACES),$(interface).wsdl)
HTMLS = $(foreach interface,$(INTERFACES),$(interface).html)
#TEMPORARY_OFF = yes
ifdef TEMPORARY_OFF
-registry.wsdl slice-manager.wsdl aggregate.wsdl sfa.wsdl:
+registry.wsdl aggregate.wsdl sfa.wsdl:
touch $@
else
registry.wsdl: sfa2wsdl.py
PYTHONPATH=../ ./sfa2wsdl.py --registry > $@
-slice-manager.wsdl: sfa2wsdl.py
- PYTHONPATH=../ ./sfa2wsdl.py --slice-manager > $@
-
aggregate.wsdl: sfa2wsdl.py
PYTHONPATH=../ ./sfa2wsdl.py --aggregate > $@
sfa.wsdl: sfa2wsdl.py
- PYTHONPATH=../ ./sfa2wsdl.py --registry --slice-manager --aggregate > $@
+ PYTHONPATH=../ ./sfa2wsdl.py --registry --aggregate > $@
endif
#################### install
self.interface_options = interface_options
def interface_name (self):
- if self.interface_options.aggregate and \
- self.interface_options.slicemgr and \
- self.interface_options.registry:
+ if (self.interface_options.aggregate and
+ self.interface_options.registry):
return "complete"
if self.interface_options.aggregate: return "aggregate"
- elif self.interface_options.slicemgr: return "slicemgr"
elif self.interface_options.registry: return "registry"
- elif self.interface_options.component: return "component"
else: return "unknown"
- def filter_argname(self,argname):
+ def filter_argname(self, argname):
if (not self.interface_options.lite or (argname!="cred")):
if (argname.find('(') != -1):
# The name has documentation in it :-/
min_args = 0
else:
min_args = 1
-
+
self.num_types += 1
type_name = "Type%d"%self.num_types
complex_type = types_section.appendChild(self.types.createElement("xsd:complexType"))
elif (isinstance(arg, Parameter)):
return (self.name_simple_type(arg.type))
elif type(arg) in ( ListType , TupleType ):
- inner_type = self.name_complex_type(arg[0])
+ inner_type = self.name_complex_type(arg[0])
self.num_types=self.num_types+1
type_name = "Type%d"%self.num_types
complex_type = types_section.appendChild(self.types.createElement("xsd:complexType"))
type_name = self.filter_argname(type_name)
complex_type.setAttribute("name", type_name)
complex_content = complex_type.appendChild(self.types.createElement("xsd:sequence"))
-
+
for k in arg.fields:
- inner_type = self.name_complex_type(arg.fields[k])
+ inner_type = self.name_complex_type(arg.fields[k])
element=complex_content.appendChild(self.types.createElement("xsd:element"))
element.setAttribute("name",k)
element.setAttribute("type",inner_type)
- return "xsdl:%s"%type_name
+ return "xsdl:%s"%type_name
else:
return (self.name_simple_type(arg))
#print "\n".join(lines)
#print
-
+
in_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("message"))
in_el.setAttribute("name", method + "_in")
arg_part = in_el.appendChild(self.wsdl.createElement("part"))
arg_part.setAttribute("name", argname)
arg_part.setAttribute("type", self.param_type(argtype))
-
- # Return type
+
+ # Return type
return_type = function.returns
out_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("message"))
out_el.setAttribute("name", method + "_out")
port_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("portType"))
port_el.setAttribute("name", method + "_port")
-
+
op_el = port_el.appendChild(self.wsdl.createElement("operation"))
op_el.setAttribute("name", method)
inp_el=self.wsdl.createElement("input")
bind_el = self.wsdl.lastChild.appendChild(self.wsdl.createElement("binding"))
bind_el.setAttribute("name", method + "_binding")
bind_el.setAttribute("type", "tns:" + method + "_port")
-
+
soap_bind = bind_el.appendChild(self.wsdl.createElement("soap:binding"))
soap_bind.setAttribute("style", "rpc")
soap_bind.setAttribute("transport","http://schemas.xmlsoap.org/soap/http")
-
+
wsdl_op = bind_el.appendChild(self.wsdl.createElement("operation"))
wsdl_op.setAttribute("name", method)
wsdl_op.appendChild(self.wsdl.createElement("soap:operation")).setAttribute("soapAction",
"urn:" + method)
-
+
wsdl_input = wsdl_op.appendChild(self.wsdl.createElement("input"))
input_soap_body = wsdl_input.appendChild(self.wsdl.createElement("soap:body"))
input_soap_body.setAttribute("use", "encoded")
input_soap_body.setAttribute("namespace", "urn:" + method)
input_soap_body.setAttribute("encodingStyle","http://schemas.xmlsoap.org/soap/encoding/")
-
+
wsdl_output = wsdl_op.appendChild(self.wsdl.createElement("output"))
output_soap_body = wsdl_output.appendChild(self.wsdl.createElement("soap:body"))
output_soap_body.setAttribute("use", "encoded")
output_soap_body.setAttribute("namespace", "urn:" + method)
output_soap_body.setAttribute("encodingStyle","http://schemas.xmlsoap.org/soap/encoding/")
-
+
def add_wsdl_services(self):
for service in self.services.keys():
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"/>
""" % (self.interface_name(),plc_ns,plc_ns,plc_ns)
-
+
self.wsdl = xml.dom.minidom.parseString(wsdl_text_header)
-
+
def compute_wsdl_definitions_and_types(self):
wsdl_text_header = """
</types>
</wsdl:definitions> """ % (self.interface_name(),plc_ns, plc_ns, plc_ns, plc_ns)
self.types = xml.dom.minidom.parseString(wsdl_text_header)
-
+
def add_wsdl_types(self):
wsdl_types = self.wsdl.importNode(self.types.getElementsByTagName("types")[0], True)
def main():
parser = OptionParser()
- parser.add_option("-r", "--registry", dest="registry", action="store_true",
+ parser.add_option("-r", "--registry", dest="registry", action="store_true",
help="Generate registry.wsdl", metavar="FILE")
- parser.add_option("-s", "--slice-manager",
- action="store_true", dest="slicemgr",
- help="Generate sm.wsdl")
parser.add_option("-a", "--aggregate", action="store_true", dest="aggregate",
help="Generate am.wsdl")
- parser.add_option("-c", "--component", action="store_true", dest="component",
- help="Generate cm.wsdl")
parser.add_option("-g", "--geni-aggregate", action="store_true", dest="geni_am",
help="Generate gm.wsdl")
parser.add_option("-l", "--lite", action="store_true", dest="lite",
gen = WSDLGen(interface_options)
gen.generate_wsdl()
gen.pretty_print()
-
+
if __name__ == "__main__":
main()