RSYNC_COND_DRY_RUN := $(if $(findstring n,$(MAKEFLAGS)),--dry-run,)
RSYNC := rsync -a -v $(RSYNC_COND_DRY_RUN) --no-owner $(RSYNC_EXCLUDES)
-CLIENTS = $(shell ls sfa/clientbin/*.py)
+CLIENTS = $(shell ls clientbin/*.py)
BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
./sfa/server/sfa-start.py \
- ./sfa/clientbin/sfaadmin.py \
+ ./clientbin/sfaadmin.py \
$(CLIENTS)
synclib: synccheck
API documentation is generated by using the pytondoc tool.
See http://effbot.org/zone/pythondoc.htm to obtain the tool.
+
+====
+WARNING: as of June 2012, most of the documents here are likely mostly outdated..
import shutil
from distutils.core import setup
-scripts = glob("sfa/clientbin/*.py") + \
+scripts = glob("clientbin/*.py") + \
[
'config/sfa-config-tty',
'config/gen-sfa-cm-config.py',
packages = [
'sfa',
- 'sfa/openstack',
'sfa/trust',
'sfa/storage',
'sfa/util',
- 'sfa/client',
'sfa/server',
'sfa/methods',
'sfa/generic',
'sfa/managers',
'sfa/importer',
- 'sfa/planetlab',
'sfa/rspecs',
'sfa/rspecs/elements',
'sfa/rspecs/elements/versions',
'sfa/rspecs/versions',
+ 'sfa/client',
+ 'sfa/planetlab',
+ 'sfa/openstack',
+ 'sfa/federica',
'sfatables',
'sfatables/commands',
'sfatables/processors',
%define name sfa
%define version 2.1
-%define taglevel 7
+%define taglevel 11
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
#Requires: python-uuid
#%endif
-%package plc
-Summary: the SFA layer around MyPLC
+%package flashpolicy
+Summary: SFA support for flash clients
Group: Applications/System
Requires: sfa
-Requires: python-psycopg2
%package client
Summary: the SFA experimenter-side CLI
Requires: sfa
Requires: pyOpenSSL >= 0.7
-%package sfatables
-Summary: sfatables policy tool for SFA
+%package plc
+Summary: the SFA layer around MyPLC
Group: Applications/System
Requires: sfa
Requires: sfa
Requires: pyOpenSSL >= 0.6
-%package flashpolicy
-Summary: SFA support for flash clients
+%package federica
+Summary: the SFA layer around Federica
+Group: Applications/System
+Requires: sfa
+
+%package sfatables
+Summary: sfatables policy tool for SFA
Group: Applications/System
Requires: sfa
%description
This package provides the python libraries for the PlanetLab implementation of SFA
-%description plc
-This package implements the SFA interface which serves as a layer
-between the existing PlanetLab interfaces and the SFA API.
+%description flashpolicy
+This package provides support for adobe flash client applications.
%description client
This package provides the client side of the SFA API, in particular
sfi.py, together with other utilities.
-%description sfatables
-sfatables is a tool for defining access and admission control policies
-in an SFA network, in much the same way as iptables is for ip
-networks. This is the command line interface to manage sfatables
+%description plc
+This package implements the SFA interface which serves as a layer
+between the existing PlanetLab interfaces and the SFA API.
%description cm
This package implements the SFA interface which serves as a layer
between the existing PlanetLab NodeManager interfaces and the SFA API.
-%description flashpolicy
-This package provides support for adobe flash client applications.
+%description federica
+The SFA driver for FEDERICA.
+
+%description sfatables
+sfatables is a tool for defining access and admission control policies
+in an SFA network, in much the same way as iptables is for ip
+networks. This is the command line interface to manage sfatables
%description xmlbuilder
This package contains the xmlbuilder python library, packaged for
rm -rf $RPM_BUILD_ROOT
%files
-# sfa and sfatables depend on each other.
-%{python_sitelib}/sfa
+%{python_sitelib}/sfa/__init__.py*
+%{python_sitelib}/sfa/trust
+%{python_sitelib}/sfa/storage
+%{python_sitelib}/sfa/util
+%{python_sitelib}/sfa/server
+%{python_sitelib}/sfa/methods
+%{python_sitelib}/sfa/generic
+%{python_sitelib}/sfa/managers
+%{python_sitelib}/sfa/importer
+%{python_sitelib}/sfa/rspecs
+%{python_sitelib}/sfa/client
/etc/init.d/sfa
%{_bindir}/sfa-start.py*
%{_bindir}/sfaadmin.py*
/usr/share/sfa/examples
/var/www/html/wsdl/*.wsdl
-%files plc
-%defattr(-,root,root)
-/etc/sfa/pl.rng
-/etc/sfa/credential.xsd
-/etc/sfa/top.xsd
-/etc/sfa/sig.xsd
-/etc/sfa/xml.xsd
-/etc/sfa/protogeni-rspec-common.xsd
-/etc/sfa/topology
-%{_bindir}/gen-sfa-cm-config.py*
+%files flashpolicy
+%{_bindir}/sfa_flashpolicy.py*
+/etc/sfa/sfa_flashpolicy_config.xml
%files client
%config (noreplace) /etc/sfa/sfi_config
%{_bindir}/sfascan
%{_bindir}/sfadump.py*
-%files sfatables
-/etc/sfatables/*
-%{_bindir}/sfatables
-%{python_sitelib}/sfatables
+%files plc
+%defattr(-,root,root)
+%{python_sitelib}/sfa/planetlab
+%{python_sitelib}/sfa/openstack
+/etc/sfa/pl.rng
+/etc/sfa/credential.xsd
+/etc/sfa/top.xsd
+/etc/sfa/sig.xsd
+/etc/sfa/xml.xsd
+/etc/sfa/protogeni-rspec-common.xsd
+/etc/sfa/topology
+%{_bindir}/gen-sfa-cm-config.py*
%files cm
/etc/init.d/sfa-cm
%{_bindir}/sfa_component_setup.py*
# cron jobs here
-%files flashpolicy
-%{_bindir}/sfa_flashpolicy.py*
-/etc/sfa/sfa_flashpolicy_config.xml
+%files federica
+%{python_sitelib}/sfa/federica
+
+%files sfatables
+/etc/sfatables/*
+%{_bindir}/sfatables
+%{python_sitelib}/sfatables
%files xmlbuilder
%{python_sitelib}/xmlbuilder
[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Thu Jun 07 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-11
+- review packaging - site-packages/planetlab now come with sfa-plc
+- new package sfa-federica
+- clientbin moved one step upwards
+
+* Wed Jun 6 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.1-10
+- fix bug in sfi update()
+
+* Sun Jun 03 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-9
+- fix broken sfa.util.xrn class for lowercase
+
+* Sat Jun 02 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-8
+- new 'void' generic_flavour for running in registry-only mode
+- first shot at refactoring importers - probably needs more work
+- openstack: various enhancements
+- sfi interface to registry not based on xml files anymore
+- sfi show sorts result on record key
+- bugfix in sfa update on users with a pl-backed registry
+
* Mon May 14 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-7
- renamed sfa/plc into sfa/planetlab
- plxrn moved in sfa/planetlab as well
#
# sfi.py - basic SFA command-line client
-# the actual binary in sfa/clientbin essentially runs main()
-# this module is used in sfascan
+# this module is also used in sfascan
#
import sys
import os, os.path
import socket
+import re
import datetime
import codecs
import pickle
from sfa.trust.credential import Credential
from sfa.trust.sfaticket import SfaTicket
+from sfa.util.faults import SfaInvalidArgument
from sfa.util.sfalogging import sfi_logger
-from sfa.util.xrn import get_leaf, get_authority, hrn_to_urn
+from sfa.util.xrn import get_leaf, get_authority, hrn_to_urn, Xrn
from sfa.util.config import Config
from sfa.util.version import version_core
from sfa.util.cache import Cache
CM_PORT=12346
# utility methods here
+def optparse_listvalue_callback(option, option_string, value, parser):
+ setattr(parser.values, option.dest, value.split(','))
+
+# a code fragment that could be helpful for argparse which unfortunately is
+# available with 2.7 only, so this feels like too strong a requirement for the client side
+#class ExtraArgAction (argparse.Action):
+# def __call__ (self, parser, namespace, values, option_string=None):
+# would need a try/except of course
+# (k,v)=values.split('=')
+# d=getattr(namespace,self.dest)
+# d[k]=v
+#####
+#parser.add_argument ("-X","--extra",dest='extras', default={}, action=ExtraArgAction,
+# help="set extra flags, testbed dependent, e.g. --extra enabled=true")
+
+def optparse_dictvalue_callback (option, option_string, value, parser):
+ try:
+ (k,v)=value.split('=',1)
+ d=getattr(parser.values, option.dest)
+ d[k]=v
+ except:
+ parser.print_help()
+ sys.exit(1)
+
# display methods
def display_rspec(rspec, format='rspec'):
if format in ['dns']:
def display_record(record, dump=False):
if dump:
- record.dump()
+ record.dump(sort=True)
else:
info = record.getdict()
print "%s (%s)" % (info['hrn'], info['type'])
f.close()
return
+# minimally check a key argument
+def check_ssh_key (key):
+ good_ssh_key = r'^.*(?:ssh-dss|ssh-rsa)[ ]+[A-Za-z0-9+/=]+(?: .*)?$'
+ return re.match(good_ssh_key, key, re.IGNORECASE)
# load methods
+def load_record_from_opts(options):
+ record_dict = {}
+ if hasattr(options, 'xrn') and options.xrn:
+ if hasattr(options, 'type') and options.type:
+ xrn = Xrn(options.xrn, options.type)
+ else:
+ xrn = Xrn(options.xrn)
+ record_dict['urn'] = xrn.get_urn()
+ record_dict['hrn'] = xrn.get_hrn()
+ record_dict['type'] = xrn.get_type()
+ if hasattr(options, 'key') and options.key:
+ try:
+ pubkey = open(options.key, 'r').read()
+ except IOError:
+ pubkey = options.key
+ if not check_ssh_key (pubkey):
+ raise SfaInvalidArgument(name='key',msg="Could not find file, or wrong key format")
+ record_dict['keys'] = [pubkey]
+ if hasattr(options, 'slices') and options.slices:
+ record_dict['slices'] = options.slices
+ if hasattr(options, 'researchers') and options.researchers:
+ record_dict['researcher'] = options.researchers
+ if hasattr(options, 'email') and options.email:
+ record_dict['email'] = options.email
+ if hasattr(options, 'pis') and options.pis:
+ record_dict['pi'] = options.pis
+
+ # handle extra settings
+ record_dict.update(options.extras)
+
+ return Record(dict=record_dict)
+
def load_record_from_file(filename):
f=codecs.open(filename, encoding="utf-8", mode="r")
xml_string = f.read()
parser = OptionParser(usage="sfi [sfi_options] %s [cmd_options] %s" \
% (command, self.available_dict[command]))
+ if command in ("add", "update"):
+ parser.add_option('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)')
+ parser.add_option('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ parser.add_option('-e', '--email', dest='email', default="", help="email (mandatory for users)")
+# use --extra instead
+# parser.add_option('-u', '--url', dest='url', metavar='<url>', default=None, help="URL, useful for slices")
+# parser.add_option('-d', '--description', dest='description', metavar='<description>',
+# help='Description, useful for slices', default=None)
+ parser.add_option('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
+ default=None)
+ parser.add_option('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ parser.add_option('-r', '--researchers', dest='researchers', metavar='<researchers>',
+ help='slice researchers', default='', type="str", action='callback',
+ callback=optparse_listvalue_callback)
+ parser.add_option('-p', '--pis', dest='pis', metavar='<PIs>', help='Principal Investigators/Project Managers',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+# use --extra instead
+# parser.add_option('-f', '--firstname', dest='firstname', metavar='<firstname>', help='user first name')
+# parser.add_option('-l', '--lastname', dest='lastname', metavar='<lastname>', help='user last name')
+ parser.add_option ('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>",
+ action="callback", callback=optparse_dictvalue_callback, nargs=1,
+ help="set extra/testbed-dependent flags, e.g. --extra enabled=true")
+
# user specifies remote aggregate/sm/component
if command in ("resources", "slices", "create", "delete", "start", "stop",
"restart", "shutdown", "get_ticket", "renew", "status"):
help="display format ([xml]|dns|ip)", default="xml",
choices=("xml", "dns", "ip"))
#panos: a new option to define the type of information about resources a user is interested in
- parser.add_option("-i", "--info", dest="info",
+ parser.add_option("-i", "--info", dest="info",
help="optional component information", default=None)
+ # a new option to retreive or not reservation-oriented RSpecs (leases)
+ parser.add_option("-l", "--list_leases", dest="list_leases", type="choice",
+ help="Retreive or not reservation-oriented RSpecs ([resources]|leases|all )",
+ choices=("all", "resources", "leases"), default="resources")
# 'create' does return the new rspec, makes sense to save that too
self.logger.error("No record of type %s"% options.type)
records = [ Record(dict=record_dict) for record_dict in record_dicts ]
for record in records:
- if (options.format == "text"): record.dump()
+ if (options.format == "text"): record.dump(sort=True)
else: print record.save_as_xml()
if options.file:
save_records_to_file(options.file, record_dicts, options.fileformat)
def add(self, options, args):
"add record into registry from xml file (Register)"
auth_cred = self.my_authority_credential_string()
- if len(args)!=1:
+ record_dict = {}
+ if len(args) > 0:
+ record_filepath = args[0]
+ rec_file = self.get_record_file(record_filepath)
+ record_dict.update(load_record_from_file(rec_file).todict())
+ if options:
+ record_dict.update(load_record_from_opts(options).todict())
+ # we should have a type by now
+ if 'type' not in record_dict :
self.print_help()
sys.exit(1)
- record_filepath = args[0]
- rec_file = self.get_record_file(record_filepath)
- record = load_record_from_file(rec_file).todict()
- return self.registry().Register(record, auth_cred)
+ # this is still planetlab dependent.. as plc will whine without that
+ # also, it's only for adding
+ if record_dict['type'] == 'user':
+ if not 'first_name' in record_dict:
+ record_dict['first_name'] = record_dict['hrn']
+ if 'last_name' not in record_dict:
+ record_dict['last_name'] = record_dict['hrn']
+ return self.registry().Register(record_dict, auth_cred)
def update(self, options, args):
"update record into registry from xml file (Update)"
- if len(args)!=1:
+ record_dict = {}
+ if len(args) > 0:
+ record_filepath = args[0]
+ rec_file = self.get_record_file(record_filepath)
+ record_dict.update(load_record_from_file(rec_file).todict())
+ if options:
+ record_dict.update(load_record_from_opts(options).todict())
+ # at the very least we need 'type' here
+ if 'type' not in record_dict:
self.print_help()
sys.exit(1)
- rec_file = self.get_record_file(args[0])
- record = load_record_from_file(rec_file)
- if record.type == "user":
- if record.hrn == self.user:
+
+ # don't translate into an object, as this would possibly distort
+ # user-provided data; e.g. add an 'email' field to Users
+ if record_dict['type'] == "user":
+ if record_dict['hrn'] == self.user:
cred = self.my_credential_string
else:
cred = self.my_authority_credential_string()
- elif record.type in ["slice"]:
+ elif record_dict['type'] in ["slice"]:
try:
- cred = self.slice_credential_string(record.hrn)
+ cred = self.slice_credential_string(record_dict['hrn'])
except ServerException, e:
# XXX smbaker -- once we have better error return codes, update this
# to do something better than a string compare
cred = self.my_authority_credential_string()
else:
raise
- elif record.type in ["authority"]:
+ elif record_dict['type'] in ["authority"]:
cred = self.my_authority_credential_string()
- elif record.type == 'node':
+ elif record_dict['type'] == 'node':
cred = self.my_authority_credential_string()
else:
- raise "unknown record type" + record.type
- record_dict = record.todict()
+ raise "unknown record type" + record_dict['type']
return self.registry().Update(record_dict, cred)
def remove(self, options, args):
api_options['geni_slice_urn'] = hrn_to_urn(hrn, 'slice')
if options.info:
api_options['info'] = options.info
+ if options.list_leases:
+ api_options['list_leases'] = options.list_leases
if options.current:
if options.current == True:
api_options['cached'] = False
def locate_by_type_pointer (self, type, pointer):
return self.records_by_type_pointer.get ( (type, pointer), None)
- # convenience : try to locate first based on type+pointer
- # if so, the record was created already even if e.g. its hrn has changed meanwhile
- # otherwise we try by type+hrn (is this truly useful ?)
- def locate (self, type, hrn=None, pointer=-1):
- if pointer!=-1:
- attempt = self.locate_by_type_pointer (type, pointer)
- if attempt : return attempt
- if hrn is not None:
- attempt = self.locate_by_type_hrn (type, hrn,)
- if attempt : return attempt
- return None
+ # a convenience/helper function to see if a record is already known
+ # a former, broken, attempt (in 2.1-9) had been made
+ # to try and use 'pointer' as a first, most significant attempt
+ # the idea being to preserve stuff as much as possible, and thus
+ # to avoid creating a new gid in the case of a simple hrn rename
+ # however this of course doesn't work as the gid depends on the hrn...
+ #def locate (self, type, hrn=None, pointer=-1):
+ # if pointer!=-1:
+ # attempt = self.locate_by_type_pointer (type, pointer)
+ # if attempt : return attempt
+ # if hrn is not None:
+ # attempt = self.locate_by_type_hrn (type, hrn,)
+ # if attempt : return attempt
+ # return None
# this makes the run method a bit abtruse - out of the way
def create_special_vini_record (self, interface_hrn):
site_hrn = _get_site_hrn(interface_hrn, site)
# import if hrn is not in list of existing hrns or if the hrn exists
# but its not a site record
- site_record=self.locate ('authority', site_hrn, site['site_id'])
+ site_record=self.locate_by_type_hrn ('authority', site_hrn)
if not site_record:
try:
urn = hrn_to_urn(site_hrn, 'authority')
continue
site_auth = get_authority(site_hrn)
site_name = site['login_base']
- hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
+ node_hrn = hostname_to_hrn(site_auth, site_name, node['hostname'])
# xxx this sounds suspicious
- if len(hrn) > 64: hrn = hrn[:64]
- node_record = self.locate ( 'node', hrn , node['node_id'] )
+ if len(node_hrn) > 64: node_hrn = node_hrn[:64]
+ node_record = self.locate_by_type_hrn ( 'node', node_hrn )
if not node_record:
try:
pkey = Keypair(create=True)
- urn = hrn_to_urn(hrn, 'node')
+ urn = hrn_to_urn(node_hrn, 'node')
node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
- node_record = RegNode (hrn=hrn, gid=node_gid,
+ node_record = RegNode (hrn=node_hrn, gid=node_gid,
pointer =node['node_id'],
- authority=get_authority(hrn))
+ authority=get_authority(node_hrn))
node_record.just_created()
dbsession.add(node_record)
dbsession.commit()
if len(person_hrn) > 64: person_hrn = person_hrn[:64]
person_urn = hrn_to_urn(person_hrn, 'user')
- user_record = self.locate ( 'user', person_hrn, person['person_id'])
+ user_record = self.locate_by_type_hrn ( 'user', person_hrn)
# return a tuple pubkey (a plc key object) and pkey (a Keypair object)
def init_person_key (person, plc_keys):
except:
self.logger.warning ("PlImporter: cannot locate slice_id %s - ignored"%slice_id)
slice_hrn = slicename_to_hrn(interface_hrn, slice['name'])
- slice_record = self.locate ('slice', slice_hrn, slice['slice_id'])
+ slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
if not slice_record:
try:
pkey = Keypair(create=True)
dbsession.commit()
# update the PLC information that was specified with the record
- # xxx oddly enough, without this statement, record.__dict__ as received by
- # the driver seems to be off
+ # xxx oddly enough, without this useless statement,
+ # record.__dict__ as received by the driver seems to be off
# anyway the driver should receive an object
# (and then extract __dict__ itself if needed)
- print "before driver.update, record=%s"%record
+ print "DO NOT REMOVE ME before driver.update, record=%s"%record
if not self.driver.update (record.__dict__, new_record.__dict__, hrn, new_key):
logger.warning("driver.update failed")
from sfa.util.defaultdict import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
+from sfa.util.plxrn import PlXrn, hrn_to_pl_slicename
from sfa.util.cache import Cache
from sfa.trust.credential import Credential
# used to be used in get_ticket
if type == 'slice':
# add slice description, name, researchers, PI
- name = Xrn(hrn).get_leaf()
+ name = hrn_to_pl_slicename(hrn)
researchers = sfa_record.get('researchers', [])
pis = sfa_record.get('pis', [])
project_manager = None
elif type == "slice":
# can update project manager and description
- name = Xrn(hrn).get_leaf()
+ name = hrn_to_pl_slicename(hrn)
researchers = sfa_record.get('researchers', [])
pis = sfa_record.get('pis', [])
project_manager = None
##########
def remove (self, sfa_record):
type=sfa_record['type']
- name = Xrn(sfa_record['hrn']).get_leaf()
if type == 'user':
+ name = Xrn(sfa_record['hrn']).get_leaf()
if self.shell.auth_manager.get_user(name):
self.shell.auth_manager.delete_user(name)
elif type == 'slice':
+ name = hrn_to_pl_slicename(sfa_record['hrn'])
if self.shell.auth_manager.get_project(name):
self.shell.auth_manager.delete_project(name)
return True
records = [records]
for record in records:
- name = Xrn(record['hrn']).get_leaf()
os_record = None
if record['type'] == 'user':
+ name = Xrn(record['hrn']).get_leaf()
os_record = self.shell.auth_manager.get_user(name)
projects = self.shell.db.project_get_by_user(name)
record['slices'] = [self.hrn + "." + proj.name for \
record['roles'] = self.shell.db.user_get_roles(name)
keys = self.shell.db.key_pair_get_all_by_user(name)
record['keys'] = [key.public_key for key in keys]
- elif record['type'] == 'slice':
+ elif record['type'] == 'slice':
+ name = hrn_to_pl_slicename(record['hrn'])
os_record = self.shell.auth_manager.get_project(name)
record['description'] = os_record.description
record['PI'] = [self.hrn + "." + os_record.project_manager.name]
def sliver_status (self, slice_urn, slice_hrn):
# find out where this slice is currently running
- project_name = Xrn(slice_urn).get_leaf()
+ project_name = hrn_to_pl_slicename(slice_hrn)
project = self.shell.auth_manager.get_project(project_name)
instances = self.shell.db.instance_get_all_by_project(project_name)
if len(instances) == 0:
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
- project_name = get_leaf(slice_hrn)
+ project_name = hrn_to_pl_slicename(slice_hrn)
aggregate = OSAggregate(self)
# parse rspec
rspec = RSpec(rspec_string)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
# we need to do this using the context of one of the slice users
- project_name = Xrn(slice_urn).get_leaf()
+ project_name = hrn_to_pl_slicename(slice_hrn)
self.euca_shell.init_context(project_name)
- name = OSXrn(xrn=slice_urn).name
aggregate = OSAggregate(self)
- return aggregate.delete_instances(name)
+ return aggregate.delete_instances(project_name)
def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
- name = OSXrn(xrn=slice_urn).name
+ name = hrn_to_pl_slicename(slice_hrn)
aggregate = OSAggregate(self)
return aggregate.update_instances(name)
+
import os
+import socket
import base64
import string
import random
from sfa.rspecs.elements.login import Login
from sfa.rspecs.elements.disk_image import DiskImage
from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.interface import Interface
from sfa.util.xrn import Xrn
+from sfa.util.plxrn import PlXrn, hrn_to_pl_slicename
from sfa.util.osxrn import OSXrn
from sfa.rspecs.version_manager import VersionManager
from sfa.openstack.image import ImageManager
rspec.version.add_nodes(nodes)
return rspec.toxml()
+ def get_availability_zones(self):
+ zones = self.driver.shell.db.zone_get_all()
+ if not zones:
+ zones = ['cloud']
+ else:
+ zones = [zone.name for zone in zones]
+
def get_slice_nodes(self, slice_xrn):
image_manager = ImageManager(self.driver)
- name = OSXrn(xrn = slice_xrn).name
+
+ zones = self.get_availability_zones()
+ name = hrn_to_pl_slicename(slice_xrn)
instances = self.driver.shell.db.instance_get_all_by_project(name)
rspec_nodes = []
for instance in instances:
rspec_node = Node()
- xrn = OSXrn(instance.hostname, 'node')
- rspec_node['component_id'] = xrn.urn
- rspec_node['component_name'] = xrn.name
+ interfaces = []
+ for fixed_ip in instance.fixed_ips:
+ if_xrn = PlXrn(auth=self.driver.hrn,
+ interface='node%s:eth0' % (instance.hostname))
+ interface = Interface({'component_id': if_xrn.urn})
+ interface['ips'] = [{'address': fixed_ip['address'],
+ 'netmask': fixed_ip['network'].netmask,
+ 'type': 'ipv4'}]
+ interfaces.append(interface)
+ if instance.availability_zone:
+ node_xrn = OSXrn(instance.availability_zone, 'node')
+ else:
+ node_xrn = OSXrn('cloud', 'node')
+
+ rspec_node['component_id'] = node_xrn.urn
+ rspec_node['component_name'] = node_xrn.name
rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
sliver = instance_to_sliver(instance)
disk_image = image_manager.get_disk_image(instance.image_ref)
- sliver['disk_images'] = [disk_image.to_rspec_object()]
+ sliver['disk_image'] = [disk_image.to_rspec_object()]
rspec_node['slivers'] = [sliver]
+ rspec_node['interfaces'] = interfaces
+ # slivers always provide the ssh service
+ hostname = None
+ for interface in interfaces:
+ if 'ips' in interface and interface['ips'] and \
+ isinstance(interface['ips'], list):
+ if interface['ips'][0].get('address'):
+ hostname = interface['ips'][0].get('address')
+ break
+ login = Login({'authentication': 'ssh-keys',
+ 'hostname': hostname,
+ 'port':'22', 'username': 'root'})
+ service = Services({'login': login})
+ rspec_node['services'] = [service]
rspec_nodes.append(rspec_node)
return rspec_nodes
def get_aggregate_nodes(self):
-
- zones = self.driver.shell.db.zone_get_all()
- if not zones:
- zones = ['cloud']
- else:
- zones = [zone.name for zone in zones]
-
+ zones = self.get_availability_zones()
# available sliver/instance/vm types
instances = self.driver.shell.db.instance_type_get_all().values()
# available images
slivers = []
for instance in instances:
sliver = instance_to_sliver(instance)
- sliver['disk_images'] = disk_image_objects
+ sliver['disk_image'] = disk_image_objects
slivers.append(sliver)
rspec_node['slivers'] = slivers
key = {}
key['user_id'] = username
key['name'] = username
- key['public'] = public_key
+ key['public_key'] = public_key
self.driver.shell.db.key_pair_create(key)
# remove old keys
self.driver.shell.db.key_pair_destroy(username, key.name)
- def create_security_group(self, group_name, fw_rules=[]):
- security_group = SecurityGroup(self.driver)
- security_group.create_security_group(group_name)
- for rule in fw_rules:
- security_group.add_rule_to_group(group_name,
+ def create_security_group(self, slicename, fw_rules=[]):
+ # use default group by default
+ group_name = 'default'
+ if isinstance(fw_rules, list) and fw_rules:
+ # Each sliver get's its own security group.
+ # Keep security group names unique by appending some random
+ # characters on end.
+ random_name = "".join([random.choice(string.letters+string.digits)
+ for i in xrange(6)])
+ group_name = slicename + random_name
+ security_group = SecurityGroup(self.driver)
+ security_group.create_security_group(group_name)
+ for rule in fw_rules:
+ security_group.add_rule_to_group(group_name,
protocol = rule.get('protocol'),
cidr_ip = rule.get('cidr_ip'),
port_range = rule.get('port_range'),
icmp_type_code = rule.get('icmp_type_code'))
+ return group_name
def add_rule_to_security_group(self, group_name, **kwds):
security_group = SecurityGroup(self.driver)
# iterate over sliver/instance types
for instance_type in instance_types:
fw_rules = instance_type.get('fw_rules', [])
- # Each sliver get's its own security group.
- # Keep security group names unique by appending some random
- # characters on end.
- random_name = "".join([random.choice(string.letters+string.digits)
- for i in xrange(6)])
- group_name = slicename + random_name
- self.create_security_group(group_name, fw_rules)
+ group_name = self.create_security_group(slicename, fw_rules)
ami_id = default_image_id
aki_id = default_aki_id
ari_id = default_ari_id
- req_image = instance_type.get('disk_images')
+ req_image = instance_type.get('disk_image')
if req_image and isinstance(req_image, list):
req_image_name = req_image[0]['name']
disk_image = image_manager.get_disk_image(name=req_image_name)
def _validate_port_range(self, port_range):
from_port = to_port = None
if isinstance(port_range, str):
- ports = port_range.split('-')
+ ports = port_range.split(':')
if len(ports) > 1:
from_port = int(ports[0])
to_port = int(ports[1])
else:
from_port = to_port = int(ports[0])
- else:
- from_port = to_port = None
return (from_port, to_port)
def _validate_icmp_type_code(self, icmp_type_code):
from_port, to_port = self._validate_port_range(port_range)
icmp_type = self._validate_icmp_type_code(icmp_type_code)
- if icmp_type:
+ if icmp_type and icmp_type[0] and icmp_type[1]:
from_port, to_port = icmp_type[0], icmp_type[1]
if group_name:
from sfa.rspecs.elements.interface import Interface
from sfa.rspecs.elements.services import Services
from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
from sfa.rspecs.version_manager import VersionManager
-from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename
+from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, hrn_to_pl_slicename, slicename_to_hrn
from sfa.planetlab.vlink import get_tc_rate
from sfa.planetlab.topology import Topology
+import time
class PlAggregate:
filter.update({'peer_id': None})
nodes = self.driver.shell.GetNodes(filter)
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.shell.GetLeaseGranularity()
site_ids = []
interface_ids = []
if site['longitude'] and site['latitude']:
location = Location({'longitude': site['longitude'], 'latitude': site['latitude'], 'country': 'unknown'})
rspec_node['location'] = location
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+
rspec_node['interfaces'] = []
if_count=0
for if_id in node['interface_ids']:
rspec_nodes.append(rspec_node)
return (rspec_nodes, links)
+
+ def get_leases(self, slice=None, options={}):
+ now = int(time.time())
+ filter={}
+ filter.update({'clip':now})
+ if slice:
+ filter.update({'name':slice['name']})
+ return_fields = ['lease_id', 'hostname', 'site_id', 'name', 't_from', 't_until']
+ leases = self.driver.shell.GetLeases(filter)
+
+ site_ids = []
+ for lease in leases:
+ site_ids.append(lease['site_id'])
+
+ # get sites
+ sites_dict = self.get_sites({'site_id': site_ids})
+
+ rspec_leases = []
+ for lease in leases:
+
+ rspec_lease = Lease()
+
+ # xxx how to retrieve site['login_base']
+ site_id=lease['site_id']
+ site=sites_dict[site_id]
+
+ rspec_lease['lease_id'] = lease['lease_id']
+ rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, site['login_base'], lease['hostname'])
+ slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ rspec_lease['slice_id'] = slice_urn
+ rspec_lease['t_from'] = lease['t_from']
+ rspec_lease['t_until'] = lease['t_until']
+ rspec_leases.append(rspec_lease)
+ return rspec_leases
+
+
def get_rspec(self, slice_xrn=None, version = None, options={}):
version_manager = VersionManager()
if slice and 'expires' in slice:
rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
- nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers)
- rspec.version.add_nodes(nodes)
- rspec.version.add_links(links)
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+ nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers)
+ rspec.version.add_nodes(nodes)
+ rspec.version.add_links(links)
+ # add sliver defaults
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ for attrib in default_sliver_attribs:
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
- # add sliver defaults
- default_sliver = slivers.get(None, [])
- if default_sliver:
- default_sliver_attribs = default_sliver.get('tags', [])
- for attrib in default_sliver_attribs:
- logger.info(attrib)
- rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases = self.get_leases(slice)
+ rspec.version.add_leases(leases)
+
return rspec.toxml()
import sfa.planetlab.peers as peers
from sfa.planetlab.plaggregate import PlAggregate
from sfa.planetlab.plslices import PlSlices
-from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename
+from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname
def list_to_dict(recs, key):
'password', 'phone', 'url', 'bio', 'accepted_aup',
'enabled']:
update_fields[key] = all_fields[key]
+ # when updating a user, we always get a 'email' field at this point
+ # this is because 'email' is a native field in the RegUser object...
+ if 'email' in update_fields and not update_fields['email']:
+ del update_fields['email']
self.shell.UpdatePerson(pointer, update_fields)
if new_key:
#panos adding the info option to the caching key (can be improved)
if options.get('info'):
version_string = version_string + "_"+options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_"+options.get('list_leases', 'default')
# look in cache first
if cached_requested and self.cache and not slice_hrn:
for node in rspec.version.get_nodes_with_slivers():
hostname = None
if node.get('component_name'):
- hostname = node.get('component_name')
+ hostname = node.get('component_name').strip()
elif node.get('component_id'):
- hostname = xrn_to_hostname(node.get('component_id'))
+ hostname = xrn_to_hostname(node.get('component_id').strip())
if hostname:
requested_slivers.append(hostname)
nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
# add/remove links links
slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
+
+ # add/remove leases
+ requested_leases = []
+ kept_leases = []
+ for lease in rspec.version.get_leases():
+ requested_lease = {}
+ if not lease.get('lease_id'):
+ requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
+ requested_lease['t_from'] = lease.get('t_from')
+ requested_lease['t_until'] = lease.get('t_until')
+ else:
+ kept_leases.append(int(lease['lease_id']))
+ if requested_lease.get('hostname'):
+ requested_leases.append(requested_lease)
+
+ leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
# handle MyPLC peer association.
# only used by plc and ple.
'UpdateSlice', 'UpdateSliceTag',
# also used as-is in importer
'GetSites','GetNodes',
+ # Lease management methods
+ 'GetLeases', 'GetLeaseGranularity', 'DeleteLeases','UpdateLeases',
+ 'AddLeases'
]
# support for other names - this is experimental
alias_calls = { 'get_authorities':'GetSites',
logger.debug('PlShell %s (%s) returned ... '%(name,actual_name))
return result
return func
+
return sfa_peer
+ def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
+
+ leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ current_leases = [lease['lease_id'] for lease in leases]
+ deleted_leases = list(set(current_leases).difference(kept_leases))
+
+ try:
+ if peer:
+ self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+ deleted=self.driver.shell.DeleteLeases(deleted_leases)
+ for lease in requested_leases:
+ added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['t_from']), int(lease['t_until']))
+
+ except:
+ logger.log_exc('Failed to add/remove slice leases')
+
+ return leases
+
+
def verify_slice_nodes(self, slice, requested_slivers, peer):
nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
users_by_site = defaultdict(list)
users_dict = {}
for user in users:
+ user['urn'] = user['urn'].lower()
hrn, type = urn_to_hrn(user['urn'])
username = get_leaf(hrn)
login_base = PlXrn(xrn=user['urn']).pl_login_base()
user['site'] = login_base
if 'email' in user:
+ user['email'] = user['email'].lower()
users_by_email[user['email']] = user
users_dict[user['email']] = user
else:
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Granularity(Element):
+
+ fields = [
+ 'grain',
+ ]
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Lease(Element):
+
+ fields = [
+ 'lease_id',
+ 'component_id',
+ 'slice_id'
+ 't_from',
+ 't_until',
+ ]
'name',
'type',
'tags',
- 'disk_images',
+ 'disk_image',
'fw_rules',
]
--- /dev/null
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XpathFilter
+from sfa.rspecs.elements.interface import Interface
+
+class PGv2Interface:
+
+ @staticmethod
+ def add_interfaces(xml, interfaces):
+ for interface in interfaces:
+ if_elem = xml.add_instance('interface', interface, ['component_id', 'client_id'])
+ ips = interface.get('ips', [])
+ for ip in ips:
+ if_elem.add_instance('ip', {'address': ip.get('address'),
+ 'netmask': ip.get('netmask'),
+ 'type': ip.get('type')})
+
+ @staticmethod
+ def get_interfaces(xml):
+ pass
from sfa.rspecs.elements.pltag import PLTag
from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
+from sfa.rspecs.elements.versions.pgv2Interface import PGv2Interface
from sfa.planetlab.plxrn import xrn_to_hostname
if node.get('location'):
node_elem.add_instance('location', node['location'], Location.fields)
# set interfaces
- if node.get('interfaces'):
- for interface in node.get('interfaces', []):
- node_elem.add_instance('interface', interface, ['component_id', 'client_id'])
+ PGv2Interface.add_interfaces(node_elem, node.get('interfaces'))
+ #if node.get('interfaces'):
+ # for interface in node.get('interfaces', []):
+ # node_elem.add_instance('interface', interface, ['component_id', 'client_id'])
# set available element
if node.get('boot_state'):
if node.get('boot_state').lower() == 'boot':
sliver_elem.set('name', sliver['type'])
if sliver.get('client_id'):
sliver_elem.set('client_id', sliver['client_id'])
- images = sliver.get('disk_images')
+ images = sliver.get('disk_image')
if images and isinstance(images, list):
PGv2DiskImage.add_images(sliver_elem, images)
fw_rules = sliver.get('fw_rules')
sliver['component_id'] = xml.attrib['component_id']
if 'name' in sliver_elem.attrib:
sliver['type'] = sliver_elem.attrib['name']
- sliver['images'] = PGv2DiskImage.get_images(sliver_elem)
+ sliver['disk_image'] = PGv2DiskImage.get_images(sliver_elem)
sliver['fw_rules'] = PLOSv1FWRule.get_rules(sliver_elem)
slivers.append(sliver)
return slivers
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.sfav1Sliver import SFAv1Sliver
+from sfa.rspecs.elements.versions.sfav1PLTag import SFAv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+
+from sfa.planetlab.plxrn import xrn_to_hostname
+
+class SFAv1Lease:
+
+ @staticmethod
+ def add_leases(xml, leases):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(leases) > 0:
+ network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+ lease_elems = []
+ for lease in leases:
+ lease_fields = ['lease_id', 'component_id', 'slice_id', 't_from', 't_until']
+ lease_elem = network_elem.add_instance('lease', lease, lease_fields)
+ lease_elems.append(lease_elem)
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return SFAv1Lease.get_lease_objs(lease_elems)
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ for lease_elem in lease_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ if lease.get('lease_id'):
+ lease['lease_id'] = lease_elem.attrib['lease_id']
+ lease['component_id'] = lease_elem.attrib['component_id']
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['t_from'] = lease_elem.attrib['t_from']
+ lease['t_until'] = lease_elem.attrib['t_until']
+
+ leases.append(lease)
+ return leases
+
if location:
node_elem.add_instance('location', location, Location.fields)
+ # add granularity of the reservation system
+ granularity = node.get('granularity')
+ if granularity:
+ node_elem.add_instance('granularity', granularity, granularity.fields)
+
+
if isinstance(node.get('interfaces'), list):
for interface in node.get('interfaces', []):
node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4'])
SERVICES='SERVICES',
SLIVER='SLIVER',
SLIVER_TYPE='SLIVER_TYPE',
+ LEASE='LEASE',
+ GRANULARITY='GRANULARITY',
)
class RSpecElement:
from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
from sfa.rspecs.elements.versions.sfav1Node import SFAv1Node
from sfa.rspecs.elements.versions.sfav1Sliver import SFAv1Sliver
+from sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease
class SFAv1(RSpecVersion):
enabled = True
self.xml.append(network.element)
current_networks.append(current_network)
+ # Leases
+
+ def get_leases(self, filter=None):
+ return SFAv1Lease.get_leases(self.xml, filter)
+
+ def add_leases(self, leases, network = None, no_dupes=False):
+ SFAv1Lease.add_leases(self.xml, leases)
+
if __name__ == '__main__':
from sfa.rspecs.rspec import RSpec
from sfa.rspecs.rspec_elements import *
xml_record.parse_dict (input_dict)
return xml_record.toxml()
- def dump(self, format=None, dump_parents=False):
+ def dump(self, format=None, dump_parents=False, sort=False):
if not format:
format = 'text'
else:
format = format.lower()
if format == 'text':
- self.dump_text(dump_parents)
+ self.dump_text(dump_parents,sort=sort)
elif format == 'xml':
print self.save_as_xml()
elif format == 'simple':
else:
raise Exception, "Invalid format %s" % format
- def dump_text(self, dump_parents=False):
- print "".join(['=' for i in range(40)])
+ def dump_text(self, dump_parents=False, sort=False):
+ print 40*'='
print "RECORD"
# print remaining fields
- for attrib_name in self.fields():
+ fields=self.fields()
+ if sort: fields.sort()
+ for attrib_name in fields:
attrib = getattr(self, attrib_name)
# skip internals
if attrib_name.startswith('_'): continue
return True
return False
+ ########## basic tools on URNs
URN_PREFIX = "urn:publicid:IDN"
+ URN_PREFIX_lower = "urn:publicid:idn"
+
+ @staticmethod
+ def is_urn (text):
+ return text.lower().startswith(Xrn.URN_PREFIX_lower)
- ########## basic tools on URNs
@staticmethod
def urn_full (urn):
- if urn.startswith(Xrn.URN_PREFIX): return urn
+ if Xrn.is_urn(urn): return urn
else: return Xrn.URN_PREFIX+urn
@staticmethod
def urn_meaningful (urn):
- if urn.startswith(Xrn.URN_PREFIX): return urn[len(Xrn.URN_PREFIX):]
+ if Xrn.is_urn(urn): return urn[len(Xrn.URN_PREFIX):]
else: return urn
@staticmethod
def urn_split (urn):
def __init__ (self, xrn, type=None):
if not xrn: xrn = ""
# user has specified xrn : guess if urn or hrn
- if xrn.startswith(Xrn.URN_PREFIX):
+ if Xrn.is_urn(xrn):
self.hrn=None
self.urn=xrn
self.urn_to_hrn()
"""
# if not self.urn or not self.urn.startswith(Xrn.URN_PREFIX):
- if not self.urn.startswith(Xrn.URN_PREFIX):
+ if not Xrn.is_urn(self.urn):
raise SfaAPIError, "Xrn.urn_to_hrn"
parts = Xrn.urn_split(self.urn)
"""
# if not self.hrn or self.hrn.startswith(Xrn.URN_PREFIX):
- if self.hrn.startswith(Xrn.URN_PREFIX):
+ if Xrn.is_urn(self.hrn):
raise SfaAPIError, "Xrn.hrn_to_urn, hrn=%s"%self.hrn
if self.type and self.type.startswith('authority'):