##########
# a lot of stuff in the working dir is just noise
scan:
- @find . -type f | egrep -v '^\./\.|/\.git/|/\.svn/|TAGS|AA-|~$$|egg-info|\.(py[co]|doc|html|pdf|png|svg|out|bak|dg)$$'
+ @find . -type f | egrep -v '^\./\.|/\.git/|/\.svn/|TAGS|AA-|~$$|egg-info|\.(py[co]|doc|html|pdf|png|svg|out|bak|dg|pickle)$$'
tags:
$(MAKE) scan | xargs etags
# 2 forms are supported
# (*) if your plc root context has direct ssh access:
# make sync PLC=private.one-lab.org
-# (*) otherwise, entering through the root context
-# make sync PLCHOST=testbox1.inria.fr GUEST=vplc03.inria.fr
+# (*) otherwise, for test deployments, use on your testmaster
+# $ run export
+# and cut'n paste the export lines before you run make sync
-PLCHOST ?= testplc.onelab.eu
-
-ifdef GUEST
-ifdef PLCHOST
-SSHURL:=root@$(PLCHOST):/vservers/$(GUEST)
-SSHCOMMAND:=ssh root@$(PLCHOST) vserver $(GUEST)
-endif
-endif
ifdef PLC
SSHURL:=root@$(PLC):/
SSHCOMMAND:=ssh root@$(PLC)
+else
+ifdef PLCHOSTLXC
+SSHURL:=root@$(PLCHOSTLXC):/var/lib/lxc/$(GUESTNAME)/rootfs
+SSHCOMMAND:=ssh root@$(PLCHOSTLXC) ssh $(GUESTHOSTNAME)
+else
+ifdef PLCHOSTVS
+SSHURL:=root@$(PLCHOSTVS):/vservers/$(GUESTNAME)
+SSHCOMMAND:=ssh root@$(PLCHOSTVS) vserver $(GUESTNAME) exec
+endif
+endif
+endif
+
+synccheck:
+ifeq (,$(SSHURL))
+ @echo "sync: I need more info from the command line, e.g."
+ @echo " make sync PLC=boot.planetlab.eu"
+ @echo " make sync PLCHOSTVS=.. GUESTNAME=.."
+ @echo " make sync PLCHOSTLXC=.. GUESTNAME=.. GUESTHOSTNAME=.."
+ @exit 1
endif
LOCAL_RSYNC_EXCLUDES += --exclude '*.pyc'
BINS = ./config/sfa-config-tty ./config/gen-sfa-cm-config.py \
./sfa/server/sfa-start.py \
- ./sfa/importer/sfa-import.py ./sfa/importer/sfa-nuke.py \
+ ./sfa/clientbin/sfaadmin.py \
$(CLIENTS)
-synccheck:
-ifeq (,$(SSHURL))
- @echo "*sync: You must define, either PLC, or PLCHOST & GUEST, on the command line"
- @echo " e.g. make sync PLC=private.one-lab.org"
- @echo " or make sync PLCHOST=testbox1.inria.fr GUEST=vplc03.inria.fr"
- @exit 1
-endif
-
-
synclib: synccheck
+$(RSYNC) --relative ./sfa/ --exclude migrations $(SSHURL)/usr/lib\*/python2.\*/site-packages/
syncbin: synccheck
fi
check
- MESSAGE=$"Checking for PostgreSQL server"
+ MESSAGE=$"SFA: Checking for PostgreSQL server"
echo -n "$MESSAGE"
[ "$ERRORS" == 0 ] && success "$MESSAGE" || failure "$MESSAGE" ; echo
}
# migrations are now handled in the code by sfa.storage.dbschema
# install peer certs
- action $"SFA installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS
+ action $"SFA: installing peer certs" daemon /usr/bin/sfa-start.py -t -d $OPTIONS
- [ "$SFA_REGISTRY_ENABLED" == 1 ] && action $"SFA Registry" daemon /usr/bin/sfa-start.py -r -d $OPTIONS
+ [ "$SFA_REGISTRY_ENABLED" == 1 ] && action $"SFA: Registry" daemon /usr/bin/sfa-start.py -r -d $OPTIONS
- [ "$SFA_AGGREGATE_ENABLED" == 1 ] && action $"SFA Aggregate" daemon /usr/bin/sfa-start.py -a -d $OPTIONS
+ [ "$SFA_AGGREGATE_ENABLED" == 1 ] && action $"SFA: Aggregate" daemon /usr/bin/sfa-start.py -a -d $OPTIONS
- [ "$SFA_SM_ENABLED" == 1 ] && action "SFA SliceMgr" daemon /usr/bin/sfa-start.py -s -d $OPTIONS
+ [ "$SFA_SM_ENABLED" == 1 ] && action "SFA: SliceMgr" daemon /usr/bin/sfa-start.py -s -d $OPTIONS
[ "$SFA_FLASHPOLICY_ENABLED" == 1 ] && \
action "Flash Policy Server" daemon /usr/bin/sfa_flashpolicy.py --file="$SFA_FLASHPOLICY_CONFIG_FILE" --port=$SFA_FLASHPOLICY_PORT -d
%define name sfa
%define version 2.1
-%define taglevel 3
+%define taglevel 5
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%global python_sitearch %( python -c "from distutils.sysconfig import get_python_lib; print get_python_lib(1)" )
/etc/sfa/xml.xsd
/etc/sfa/protogeni-rspec-common.xsd
/etc/sfa/topology
-%{_bindir}/sfa-import.py*
-%{_bindir}/sfa-nuke.py*
%{_bindir}/gen-sfa-cm-config.py*
-%{_bindir}/sfa-ca.py*
%files client
%config (noreplace) /etc/sfa/sfi_config
[ "$1" -ge "1" ] && service sfa-cm restart || :
%changelog
+* Mon Apr 16 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.1-5
+- make sync now supports vserver or lxc.
+- Added slice expiration and login info to SliverStatus response.
+- Fixed CreateSliver bug that causes the method to fail if any node element is missing
+ the 'component_name' attribute.
+- Fixed various bugs that caused SFA to generate invalid or incorrect sliver ids.
+
+* Tue Mar 20 2012 Tony Mack <tmack@cs.princeton.edu> - sfa-2.1-4
+- Introduced new administrative command line script, sfaadmin.py. Removed various single
+ purpose scripts and migrated their functionality into sfaadmin.py.
+- Refactored Registry import scripts.
+- Removed SQLAlchemy dependency from sfi.py.
+- Fixed bugs in sfi.py
+- Registry, Aggregate and SliceManager now support the OpenStack framework.
+
* Fri Feb 24 2012 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - sfa-2.1-3
- slice x researcher rel. in database,
- plimporter to maintain that, as well as user.email, and more robust
print "unknown output format", format
def save_record_to_file(filename, record_dict):
- rec_record = Record(dict=record_dict)
- str = record.save_to_string()
+ record = Record(dict=record_dict)
+ xml = record.save_as_xml()
f=codecs.open(filename, encoding='utf-8',mode="w")
- f.write(str)
+ f.write(xml)
f.close()
return
api_options = {}
api_options ['append'] = False
api_options ['call_id'] = unique_call_id()
-
result = server.CreateSliver(slice_urn, creds, rspec, users, *self.ois(server, api_options))
value = ReturnValue.get_value(result)
if self.options.raw:
pprinter = PrettyPrinter(indent=4)
+def optparse_listvalue_callback(option, opt, value, parser):
+ setattr(parser.values, option.dest, value.split(','))
+
def args(*args, **kwargs):
def _decorator(func):
func.__dict__.setdefault('options', []).insert(0, (args, kwargs))
return _decorator
class Commands(object):
-
def _get_commands(self):
available_methods = []
for attrib in dir(self):
sfa_record = Record(dict=record)
sfa_record.dump(format)
if outfile:
- save_records_to_file(outfile, records)
+ save_records_to_file(outfile, records)
- def register(self, record):
- pass
- def update(self, record):
- pass
+ def _record_dict(self, xrn=None, type=None, url=None, key=None, \
+ description=None, slices='', researchers=''):
+ record_dict = {}
+ if xrn:
+ if type:
+ xrn = Xrn(xrn, type)
+ else:
+ xrn = Xrn(xrn)
+ record_dict['urn'] = xrn.get_urn()
+ record_dict['hrn'] = xrn.get_hrn()
+ record_dict['type'] = xrn.get_type()
+ if url:
+ record_dict['url'] = url
+ if key:
+ try:
+ pubkey = open(key, 'r').read()
+ except IOError:
+ pubkey = key
+ record_dict['keys'] = [pubkey]
+ if slices:
+ record_dict['slices'] = slices
+ if researchers:
+ record_dict['researchers'] = researchers
+ if description:
+ record_dict['description'] = description
+ return record_dict
+
+ @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn')
+ @args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ @args('-u', '--url', dest='url', metavar='<url>', help='URL', default=None)
+ @args('-d', '--description', dest='description', metavar='<description>',
+ help='Description', default=None)
+ @args('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
+ default=None)
+ @args('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='slice researchers',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @args('-p', '--pis', dest='pis', metavar='<PIs>',
+ help='Principal Investigators/Project Managers ',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ def register(self, xrn, type=None, url=None, description=None, key=None, slices='',
+ pis='', researchers=''):
+ record_dict = self._record_dict(xrn=xrn, type=type, url=url, key=key,
+ slices=slices, researchers=researchers)
+ self.api.manager.Register(self.api, record_dict)
+
+
+ @args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn')
+ @args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
+ @args('-u', '--url', dest='url', metavar='<url>', help='URL', default=None)
+ @args('-d', '--description', dest='description', metavar='<description>',
+ help='Description', default=None)
+ @args('-k', '--key', dest='key', metavar='<key>', help='public key string or file',
+ default=None)
+ @args('-s', '--slices', dest='slices', metavar='<slices>', help='slice xrns',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @args('-r', '--researchers', dest='researchers', metavar='<researchers>', help='slice researchers',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ @args('-p', '--pis', dest='pis', metavar='<PIs>',
+ help='Principal Investigators/Project Managers ',
+ default='', type="str", action='callback', callback=optparse_listvalue_callback)
+ def update(self, xrn, type=None, url=None, description=None, key=None, slices='',
+ pis='', researchers=''):
+ record_dict = self._record_dict(xrn=xrn, type=type, url=url, description=description,
+ key=key, slices=slices, researchers=researchers)
+ self.api.manager.Update(self.api, record_dict)
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn')
@args('-t', '--type', dest='type', metavar='<type>', help='object type', default=None)
if xrn:
options['geni_slice_urn'] = xrn
resources = self.api.manager.ListResources(self.api, [], options)
- pprinter.pprint(resources)
+ print resources
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
@args('-r', '--rspec', dest='rspec', metavar='<rspec>', help='rspec file')
@args('-u', '--user', dest='user', metavar='<user>', help='hrn/urn of slice user')
@args('-k', '--key', dest='key', metavar='<key>', help="path to user's public key file")
def create(self, xrn, rspec, user, key):
- xrn = Xrn(xrn)
+ xrn = Xrn(xrn, 'slice')
slice_urn=xrn.get_urn()
- slice_hrn=xrn.get_hrn()
rspec_string = open(rspec).read()
user_xrn = Xrn(user, 'user')
user_urn = user_xrn.get_urn()
user_key_string = open(key).read()
users = [{'urn': user_urn, 'keys': [user_key_string]}]
options={}
- self.api.manager.CreateSliver(self, slice_urn, slice_hrn, [], rspec_string, users, options)
+ self.api.manager.CreateSliver(self, slice_urn, [], rspec_string, users, options)
@args('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None)
def delete(self, xrn):
"""
# just create certs for all sfa interfaces even if they
# aren't enabled
- auth_info = self.auth_hierarchy.get_auth_info(self.interface_hrn)
+ auth_info = self.auth_hierarchy.get_auth_info(self.config.SFA_INTERFACE_HRN)
pkey = auth_info.get_pkey_object()
- hrn=self.interface_hrn
+ hrn=self.config.SFA_INTERFACE_HRN
for type in [ 'authority+sa', 'authority+am', 'authority+sm', ]:
urn = hrn_to_urn(hrn, type)
gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
# testbed-neutral : create local certificates and the like
auth_hierarchy = Hierarchy ()
self.create_top_level_auth_records(self.config.SFA_INTERFACE_HRN)
-
+ self.create_interface_records()
+
# testbed-specific
testbed_importer = None
generic=Generic.the_flavour()
+++ /dev/null
-#!/usr/bin/python
-##
-# Delete all the database records for SFA. This tool is used to clean out SFA
-# records during testing.
-#
-# Authority info (maintained by the hierarchy module in a subdirectory tree)
-# is not purged by this tool and may be deleted by a command like 'rm'.
-##
-
-import sys
-import os
-from optparse import OptionParser
-
-from sfa.util.sfalogging import logger
-
-from sfa.storage.alchemy import engine
-from sfa.storage.dbschema import DBSchema
-
-def main():
- usage="%prog: trash the registry DB"
- parser = OptionParser(usage=usage)
- parser.add_option("-f","--file-system",dest='clean_fs',action='store_true',default=False,
- help="Clean up the /var/lib/sfa/authorities area as well")
- parser.add_option("-c","--certs",dest='clean_certs',action='store_true',default=False,
- help="Remove all cached certs/gids found in /var/lib/sfa/authorities area as well")
- parser.add_option("-0","--no-reinit",dest='reinit',action='store_false',default=True,
- help="By default a new DB schema is installed after the cleanup; this option prevents that")
- (options,args)=parser.parse_args()
- if args:
- parser.print_help()
- sys.exit(1)
- dbschema=DBSchema()
- logger.info("Purging SFA records from database")
- dbschema.nuke()
- # for convenience we re-create the schema here, so there's no need for an explicit
- # service sfa restart
- # however in some (upgrade) scenarios this might be wrong
- if options.reinit:
- logger.info("re-creating empty schema")
- dbschema.init_or_upgrade()
-
- if options.clean_certs:
- # remove the server certificate and all gids found in /var/lib/sfa/authorities
- logger.info("Purging cached certificates")
- for (dir, _, files) in os.walk('/var/lib/sfa/authorities'):
- for file in files:
- if file.endswith('.gid') or file == 'server.cert':
- path=dir+os.sep+file
- os.unlink(path)
- if not os.path.exists(path):
- logger.info("Unlinked file %s"%path)
- else:
- logger.error("Could not unlink file %s"%path)
-
- if options.clean_fs:
- # just remove all files that do not match 'server.key' or 'server.cert'
- logger.info("Purging registry filesystem cache")
- preserved_files = [ 'server.key', 'server.cert']
- for (dir,_,files) in os.walk('/var/lib/sfa/authorities'):
- for file in files:
- if file in preserved_files: continue
- path=dir+os.sep+file
- os.unlink(path)
- if not os.path.exists(path):
- logger.info("Unlinked file %s"%path)
- else:
- logger.error("Could not unlink file %s"%path)
-if __name__ == "__main__":
- main()
call_id = options.get('call_id')
if Callids().already_handled(call_id): return {}
- xrn = Xrn(xrn)
+ xrn = Xrn(xrn,'slice')
slice_urn=xrn.get_urn()
slice_hrn=xrn.get_hrn()
return self.driver.sliver_status (slice_urn, slice_hrn)
call_id = options.get('call_id')
if Callids().already_handled(call_id): return ""
- xrn = Xrn(xrn)
+ xrn = Xrn(xrn, 'slice')
slice_urn=xrn.get_urn()
slice_hrn=xrn.get_hrn()
call_id = options.get('call_id')
if Callids().already_handled(call_id): return True
- xrn = Xrn(xrn)
+ xrn = Xrn(xrn, 'slice')
slice_urn=xrn.get_urn()
slice_hrn=xrn.get_hrn()
return self.driver.delete_sliver (slice_urn, slice_hrn, creds, options)
call_id = options.get('call_id')
if Callids().already_handled(call_id): return True
- xrn = Xrn(xrn)
+ xrn = Xrn(xrn, 'slice')
slice_urn=xrn.get_urn()
slice_hrn=xrn.get_hrn()
return self.driver.renew_sliver (slice_urn, slice_hrn, creds, expiration_time, options)
"""
def __init__(self, config):
- self.config = Config
+ self.config = config
+ self.nova_shell = NovaShell(config)
+ self.access_key = None
+ self.secret_key = None
- def get_euca_connection(self):
+ def init_context(self, project_name=None):
+
+ # use the context of the specified project's project
+ # manager.
+ if project_name:
+ project = self.nova_shell.auth_manager.get_project(project_name)
+ self.access_key = "%s:%s" % (project.project_manager.name, project_name)
+ self.secret_key = project.project_manager.secret
+ else:
+ # use admin user's context
+ admin_user = self.nova_shell.auth_manager.get_user(self.config.SFA_NOVA_USER)
+ #access_key = admin_user.access
+ self.access_key = '%s' % admin_user.name
+ self.secret_key = admin_user.secret
+
+ def get_euca_connection(self, project_name=None):
if not has_boto:
logger.info('Unable to access EC2 API - boto library not found.')
return None
- nova = NovaShell(self.config)
- admin_user = nova.auth_manager.get_user(self.config.SFA_NOVA_USER)
- access_key = admin_user.access
- secret_key = admin_user.secret
+
+ if not self.access_key or not self.secret_key:
+ self.init_context(project_name)
+
url = self.config.SFA_NOVA_API_URL
+ host = None
+ port = None
path = "/"
- euca_port = self.config.SFA_NOVA_API_PORT
use_ssl = False
-
# Split the url into parts
if url.find('https://') >= 0:
use_ssl = True
elif url.find('http://') >= 0:
use_ssl = False
url = url.replace('http://', '')
- (host, parts) = url.split(':')
+ parts = url.split(':')
+ host = parts[0]
if len(parts) > 1:
- parts = parts.split('/')
+ parts = parts[1].split('/')
port = int(parts[0])
parts = parts[1:]
- path = '/'.join(parts)
-
- return boto.connect_ec2(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
+ path = '/'+'/'.join(parts)
+ return boto.connect_ec2(aws_access_key_id=self.access_key,
+ aws_secret_access_key=self.secret_key,
is_secure=use_ssl,
region=RegionInfo(None, 'eucalyptus', host),
+ host=host,
port=port,
path=path)
def __getattr__(self, name):
def func(*args, **kwds):
conn = self.get_euca_connection()
- return getattr(conn, name)(*args, **kwds)
- return func
+
--- /dev/null
+from nova.exception import ImageNotFound
+from sfa.rspecs.elements.disk_image import DiskImage
+
+class Image:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ @staticmethod
+ def disk_image_to_rspec_object(image):
+ img = DiskImage()
+ img['name'] = image['ami']['name']
+ img['description'] = image['ami']['name']
+ img['os'] = image['ami']['name']
+ img['version'] = image['ami']['name']
+ return img
+
+ def get_available_disk_images(self):
+ # get image records
+ disk_images = []
+ for image in self.driver.shell.image_manager.detail():
+ if image['container_format'] == 'ami':
+ disk_images.append(self.get_machine_image_details(image))
+ return disk_images
+
+ def get_machine_image_details(self, image):
+ """
+ Returns a dict that contains the ami, aki and ari details for the specified
+ ami image.
+ """
+ disk_image = {}
+ if image['container_format'] == 'ami':
+ kernel_id = image['properties']['kernel_id']
+ ramdisk_id = image['properties']['ramdisk_id']
+ disk_image['ami'] = image
+ disk_image['aki'] = self.driver.shell.image_manager.show(kernel_id)
+ disk_image['ari'] = self.driver.shell.image_manager.show(ramdisk_id)
+ return disk_image
+
+ def get_disk_image(self, id=None, name=None):
+ """
+ Look up a image bundle using the specifeid id or name
+ """
+ disk_image = None
+ try:
+ if id:
+ image = self.driver.shell.image_manager.show(id)
+ elif name:
+ image = self.driver.shell.image_manager.show_by_name(name)
+ if image['container_format'] == 'ami':
+ disk_image = self.get_machine_image_details(image)
+ except ImageNotFound:
+ pass
+ return disk_image
+
+
import time
import datetime
-#
+
from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
- RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist, \
+ SfaInvalidArgument
from sfa.util.sfalogging import logger
from sfa.util.defaultdict import defaultdict
from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf, urn_to_sliver_id
from sfa.util.cache import Cache
+from sfa.trust.credential import Credential
# used to be used in get_ticket
#from sfa.trust.sfaticket import SfaTicket
##########
def register (self, sfa_record, hrn, pub_key):
type = sfa_record['type']
- pl_record = self.sfa_fields_to_pl_fields(type, hrn, sfa_record)
-
+
+ #pl_record = self.sfa_fields_to_pl_fields(type dd , hrn, sfa_record)
+
if type == 'slice':
- acceptable_fields=['url', 'instantiation', 'name', 'description']
# add slice description, name, researchers, PI
- pass
+ name = Xrn(hrn).get_leaf()
+ researchers = sfa_record.get('researchers', [])
+ pis = sfa_record.get('pis', [])
+ project_manager = None
+ description = sfa_record.get('description', None)
+ if pis:
+ project_manager = Xrn(pis[0], 'user').get_leaf()
+ elif researchers:
+ project_manager = Xrn(researchers[0], 'user').get_leaf()
+ if not project_manager:
+ err_string = "Cannot create a project without a project manager. " + \
+ "Please specify at least one PI or researcher for project: " + \
+ name
+ raise SfaInvalidArgument(err_string)
+
+ users = [Xrn(user, 'user').get_leaf() for user in \
+ pis + researchers]
+ self.shell.auth_manager.create_project(name, project_manager, description, users)
elif type == 'user':
# add person roles, projects and keys
- pass
- return pointer
+ name = Xrn(hrn).get_leaf()
+ self.shell.auth_manager.create_user(name)
+ projects = sfa_records.get('slices', [])
+ for project in projects:
+ project_name = Xrn(project).get_leaf()
+ self.shell.auth_manager.add_to_project(name, project_name)
+ keys = sfa_records.get('keys', [])
+ for key in keys:
+ key_dict = {
+ 'user_id': name,
+ 'name': name,
+ 'public': key,
+ }
+ self.shell.db.key_pair_create(key_dict)
+
+ return name
##########
# xxx actually old_sfa_record comes filled with plc stuff as well in the original code
def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
- pointer = old_sfa_record['pointer']
- type = old_sfa_record['type']
-
+ type = new_sfa_record['type']
+
# new_key implemented for users only
if new_key and type not in [ 'user' ]:
raise UnknownSfaType(type)
elif type == "slice":
- # can update description, researchers and PI
- pass
+ # can update project manager and description
+ name = Xrn(hrn).get_leaf()
+ researchers = sfa_record.get('researchers', [])
+ pis = sfa_record.get('pis', [])
+ project_manager = None
+ description = sfa_record.get('description', None)
+ if pis:
+ project_manager = Xrn(pis[0], 'user').get_leaf()
+ elif researchers:
+ project_manager = Xrn(researchers[0], 'user').get_leaf()
+ self.shell.auth_manager.modify_project(name, project_manager, description)
+
elif type == "user":
- # can update slices, keys and roles
+ # can techinally update access_key and secret_key,
+ # but that is not in our scope, so we do nothing.
pass
return True
def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+ project_name = get_leaf(slice_hrn)
aggregate = OSAggregate(self)
- slicename = get_leaf(slice_hrn)
-
# parse rspec
rspec = RSpec(rspec_string)
- requested_attributes = rspec.version.get_slice_attributes()
+
+ # ensure project and users exist in local db
+ aggregate.create_project(project_name, users, options=options)
+
+ # collect publick keys
pubkeys = []
+ project_key = None
for user in users:
- pubkeys.extend(user['keys'])
- # assume that there is a key whos nane matches the caller's username.
- project_key = Xrn(users[0]['urn']).get_leaf()
-
-
- # ensure slice record exists
- aggregate.create_project(slicename, users, options=options)
+ pubkeys.extend(user['keys'])
+ # assume first user is the caller and use their context
+ # for the ec2/euca api connection. Also, use the first users
+ # key as the project key.
+ if not project_key:
+ username = Xrn(user['urn']).get_leaf()
+ user_keys = self.shell.db.key_pair_get_all_by_user(username)
+ if user_keys:
+ project_key = user_keys[0].name
+
# ensure person records exists
- aggregate.create_project_users(slicename, users, options=options)
- # add/remove slice from nodes
- aggregate.run_instances(slicename, rspec, project_key, pubkeys)
+ self.euca_shell.init_context(project_name)
+ aggregate.run_instances(project_name, rspec_string, project_key, pubkeys)
return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ # we need to do this using the context of one of the slice users
+ project_name = Xrn(slice_urn).get_leaf()
+ self.euca_shell.init_context(project_name)
name = OSXrn(xrn=slice_urn).name
- slice = self.shell.project_get(name)
- if not slice:
- return 1
- instances = self.shell.db.instance_get_all_by_project(name)
- for instance in instances:
- self.shell.db.instance_destroy(instance.instance_id)
- return 1
+ aggregate = OSAggregate(self)
+ return aggregate.delete_instances(name)
+
+ def update_sliver(self, slice_urn, slice_hrn, rspec, creds, options):
+ name = OSXrn(xrn=slice_urn).name
+ aggregate = OSAggregate(self)
+ return aggregate.update_instances(name)
def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
return True
def stop_slice (self, slice_urn, slice_hrn, creds):
name = OSXrn(xrn=slice_urn).name
- slice = self.shell.get_project(name)
- instances = self.shell.db.instance_get_all_by_project(name)
- for instance in instances:
- self.shell.db.instance_stop(instance.instance_id)
- return 1
-
+ aggregate = OSAggregate(self)
+ return aggregate.stop_instances(name)
+
def reset_slice (self, slice_urn, slice_hrn, creds):
raise SfaNotImplemented ("reset_slice not available at this interface")
# use the 'capability' auth mechanism for higher performance when the PLC db is local
def __init__ ( self, config ) :
- url = config.SFA_PLC_URL
- # try to figure if the url is local
- is_local=False
- hostname=urlparse(url).hostname
- if hostname == 'localhost': is_local=True
- # otherwise compare IP addresses;
- # this might fail for any number of reasons, so let's harden that
- try:
- # xxx todo this seems to result in a DNS request for each incoming request to the AM
- # should be cached or improved
- url_ip=socket.gethostbyname(hostname)
- local_ip=socket.gethostbyname(socket.gethostname())
- if url_ip==local_ip: is_local=True
- except:
- pass
+ self.auth_manager = None
+ self.compute_manager = None
+ self.network_manager = None
+ self.scheduler_manager = None
+ self.db = None
+ self.image_manager = None
-
- if is_local and has_nova:
+ if has_nova:
logger.debug('nova access - native')
# load the config
flags.FLAGS(['foo', '--flagfile=/etc/nova/nova.conf', 'foo', 'foo'])
self.db = InjectContext(db, context.get_admin_context())
self.image_manager = InjectContext(GlanceImageService(), context.get_admin_context())
else:
- self.auth = None
- self.proxy = None
logger.debug('nova access - REST')
raise SfaNotImplemented('nova access - Rest')
+import os
+import base64
+import string
+import random
+from collections import defaultdict
from nova.exception import ImageNotFound
from nova.api.ec2.cloud import CloudController
from sfa.util.faults import SfaAPIError
from sfa.util.xrn import Xrn
from sfa.util.osxrn import OSXrn
from sfa.rspecs.version_manager import VersionManager
-
-
-def disk_image_to_rspec_object(image):
- img = DiskImage()
- img['name'] = image['ami']['name']
- img['description'] = image['ami']['name']
- img['os'] = image['ami']['name']
- img['version'] = image['ami']['name']
- return img
-
+from sfa.openstack.image import Image
+from sfa.openstack.security_group import SecurityGroup
+from sfa.util.sfalogging import logger
def instance_to_sliver(instance, slice_xrn=None):
# should include?
sliver = Sliver({'slice_id': sliver_id,
'name': name,
- 'type': 'plos-' + type,
+ 'type': type,
'tags': []})
return sliver
def __init__(self, driver):
self.driver = driver
- def get_machine_image_details(self, image):
- """
- Returns a dict that contains the ami, aki and ari details for the specified
- ami image.
- """
- disk_image = {}
- if image['container_format'] == 'ami':
- kernel_id = image['properties']['kernel_id']
- ramdisk_id = image['properties']['ramdisk_id']
- disk_image['ami'] = image
- disk_image['aki'] = self.driver.shell.image_manager.show(kernel_id)
- disk_image['ari'] = self.driver.shell.image_manager.show(ramdisk_id)
- return disk_image
-
- def get_disk_image(self, id=None, name=None):
- """
- Look up a image bundle using the specifeid id or name
- """
- disk_image = None
- try:
- if id:
- image = self.driver.shell.image_manager.show(image_id)
- elif name:
- image = self.driver.shell.image_manager.show_by_name(image_name)
- if image['container_format'] == 'ami':
- disk_image = self.get_machine_image_details(image)
- except ImageNotFound:
- pass
- return disk_image
-
- def get_available_disk_images(self):
- # get image records
- disk_images = []
- for image in self.driver.shell.image_manager.detail():
- if image['container_format'] == 'ami':
- disk_images.append(self.get_machine_image_details(image))
- return disk_images
-
def get_rspec(self, slice_xrn=None, version=None, options={}):
version_manager = VersionManager()
version = version_manager.get_version(version)
return rspec.toxml()
def get_slice_nodes(self, slice_xrn):
+ image_manager = Image(self.driver)
name = OSXrn(xrn = slice_xrn).name
instances = self.driver.shell.db.instance_get_all_by_project(name)
rspec_nodes = []
rspec_node['component_name'] = xrn.name
rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
sliver = instance_to_sliver(instance)
- disk_image = self.get_disk_image(instance.image_ref)
- sliver['disk_images'] = [disk_image_to_rspec_object(disk_image)]
+ disk_image = image_manager.get_disk_image(instance.image_ref)
+ sliver['disk_images'] = [Image.disk_image_to_rspec_object(disk_image)]
rspec_node['slivers'] = [sliver]
rspec_nodes.append(rspec_node)
return rspec_nodes
# available sliver/instance/vm types
instances = self.driver.shell.db.instance_type_get_all().values()
# available images
- disk_images = self.get_available_disk_images()
- disk_image_objects = [disk_image_to_rspec_object(image) \
+ image_manager = Image(self.driver)
+ disk_images = image_manager.get_available_disk_images()
+ disk_image_objects = [Image.disk_image_to_rspec_object(image) \
for image in disk_images]
rspec_nodes = []
for zone in zones:
def create_project(self, slicename, users, options={}):
"""
- Create the slice if it doesn't alredy exist
+ Create the slice if it doesn't alredy exist. Create user
+ accounts that don't already exist
"""
- import nova.exception.ProjectNotFound
+ from nova.exception import ProjectNotFound
try:
slice = self.driver.shell.auth_manager.get_project(slicename)
- except nova.exception.ProjectNotFound:
- # convert urns to user names
- usernames = [Xrn(user['urn']).get_leaf() for user in users]
+ except ProjectNotFound:
# assume that the first user is the project manager
- proj_manager = usernames[0]
+ proj_manager = Xrn(users[0]['urn']).get_leaf()
self.driver.shell.auth_manager.create_project(slicename, proj_manager)
-
- def create_project_users(self, slicename, users, options={}):
- """
- Add requested users to the specified slice.
- """
-
- # There doesn't seem to be an effcient way to
- # look up all the users of a project, so lets not
- # attempt to remove stale users . For now lets just
- # ensure that the specified users exist
+
for user in users:
username = Xrn(user['urn']).get_leaf()
try:
self.driver.shell.auth_manager.get_user(username)
except nova.exception.UserNotFound:
self.driver.shell.auth_manager.create_user(username)
- self.verify_user_keys(username, user['keys'], options)
-
+ self.verify_user_keys(username, user['keys'], options)
def verify_user_keys(self, username, keys, options={}):
"""
for key in existing_keys:
if key.public_key in removed_pub_keys:
self.driver.shell.db.key_pair_destroy(username, key.name)
-
+
+
+ def create_security_group(self, group_name, fw_rules=[]):
+ security_group = SecurityGroup(self.driver)
+ security_group.create_security_group(group_name)
+ for rule in fw_rules:
+ security_group.add_rule_to_group(group_name,
+ protocol = rule.get('protocol'),
+ cidr_ip = rule.get('cidr_ip'),
+ port_range = rule.get('port_range'),
+ icmp_type_code = rule.get('icmp_type_code'))
+
+ def add_rule_to_security_group(self, group_name, **kwds):
+ security_group = SecurityGroup(self.driver)
+ security_group.add_rule_to_group(group_name=group_name,
+ protocol=kwds.get('protocol'),
+ cidr_ip =kwds.get('cidr_ip'),
+ icmp_type_code = kwds.get('icmp_type_code'))
+
+
def reserve_instance(self, image_id, kernel_id, ramdisk_id, \
- instance_type, key_name, user_data):
- conn = self.driver.euca_shell
+ instance_type, key_name, user_data, group_name):
+ conn = self.driver.euca_shell.get_euca_connection()
logger.info('Reserving an instance: image: %s, kernel: ' \
'%s, ramdisk: %s, type: %s, key: %s' % \
(image_id, kernel_id, ramdisk_id,
ramdisk_id=ramdisk_id,
instance_type=instance_type,
key_name=key_name,
- user_data = user_data)
- #security_groups=group_names,
+ user_data = user_data,
+ security_groups=[group_name])
#placement=zone,
#min_count=min_count,
#max_count=max_count,
- except EC2ResponseError, ec2RespError:
- logger.log_exc(ec2RespError)
+ except Exception, err:
+ logger.log_exc(err)
def run_instances(self, slicename, rspec, keyname, pubkeys):
"""
- Create the instances thats requested in the rspec
+ Create the security groups and instances.
"""
# the default image to use for instnaces that dont
# explicitly request an image.
# Just choose the first available image for now.
- available_images = self.get_available_disk_images()
- default_image = self.get_disk_images()[0]
- default_ami_id = CloudController.image_ec2_id(default_image['ami']['id'])
- default_aki_id = CloudController.image_ec2_id(default_image['aki']['id'])
- default_ari_id = CloudController.image_ec2_id(default_image['ari']['id'])
+ image_manager = Image(self.driver)
+ available_images = image_manager.get_available_disk_images()
+ default_image = available_images[0]
+ default_ami_id = CloudController.image_ec2_id(default_image['ami']['id'], 'ami')
+ default_aki_id = CloudController.image_ec2_id(default_image['aki']['id'], 'aki')
+ default_ari_id = CloudController.image_ec2_id(default_image['ari']['id'], 'ari')
# get requested slivers
rspec = RSpec(rspec)
+ user_data = "\n".join(pubkeys)
requested_instances = defaultdict(list)
# iterate over clouds/zones/nodes
for node in rspec.version.get_nodes_with_slivers():
if isinstance(instance_types, list):
# iterate over sliver/instance types
for instance_type in instance_types:
+ fw_rules = instance_type.get('fw_rules', [])
+ # Each sliver get's its own security group.
+ # Keep security group names unique by appending some random
+ # characters on end.
+ random_name = "".join([random.choice(string.letters+string.digits)
+ for i in xrange(6)])
+ group_name = slicename + random_name
+ self.create_security_group(group_name, fw_rules)
ami_id = default_ami_id
aki_id = default_aki_id
ari_id = default_ari_id
req_image = instance_type.get('disk_images')
if req_image and isinstance(req_image, list):
req_image_name = req_image[0]['name']
- disk_image = self.get_disk_image(name=req_image_name)
+ disk_image = image_manager.get_disk_image(name=req_image_name)
if disk_image:
- ami_id = CloudController.image_ec2_id(disk_image['ami']['id'])
- aki_id = CloudController.image_ec2_id(disk_image['aki']['id'])
- ari_id = CloudController.image_ec2_id(disk_image['ari']['id'])
+ ami_id = CloudController.image_ec2_id(disk_image['ami']['id'], 'ami')
+ aki_id = CloudController.image_ec2_id(disk_image['aki']['id'], 'aki')
+ ari_id = CloudController.image_ec2_id(disk_image['ari']['id'], 'ari')
# start the instance
- self.reserve_instance(ami_id, aki_id, ari_id, \
- instance_type['name'], keyname, pubkeys)
+ self.reserve_instance(image_id=ami_id,
+ kernel_id=aki_id,
+ ramdisk_id=ari_id,
+ instance_type=instance_type['name'],
+ key_name=keyname,
+ user_data=user_data,
+ group_name=group_name)
+
+
+ def delete_instances(self, project_name):
+ instances = self.driver.shell.db.instance_get_all_by_project(project_name)
+ security_group_manager = SecurityGroup(self.driver)
+ for instance in instances:
+ # deleate this instance's security groups
+ for security_group in instance.security_groups:
+ # dont delete the default security group
+ if security_group.name != 'default':
+ security_group_manager.delete_security_group(security_group.name)
+ # destroy instance
+ self.driver.shell.db.instance_destroy(instance.id)
+ return 1
+
+ def stop_instances(self, project_name):
+ instances = self.driver.shell.db.instance_get_all_by_project(project_name)
+ for instance in instances:
+ self.driver.shell.db.instance_stop(instance.id)
+ return 1
+ def update_instances(self, project_name):
+ pass
--- /dev/null
+from sfa.util.sfalogging import logger
+
+class SecurityGroup:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+
+ def create_security_group(self, name):
+ conn = self.driver.euca_shell.get_euca_connection()
+ try:
+ conn.create_security_group(name=name, description="")
+ except Exception, ex:
+ logger.log_exc("Failed to add security group")
+
+ def delete_security_group(self, name):
+ conn = self.driver.euca_shell.get_euca_connection()
+ try:
+ conn.delete_security_group(name=name)
+ except Exception, ex:
+ logger.log_exc("Failed to delete security group")
+
+
+ def _validate_port_range(self, port_range):
+ from_port = to_port = None
+ if isinstance(port_range, str):
+ ports = port_range.split('-')
+ if len(ports) > 1:
+ from_port = int(ports[0])
+ to_port = int(ports[1])
+ else:
+ from_port = to_port = int(ports[0])
+ else:
+ from_port = to_port = None
+ return (from_port, to_port)
+
+ def _validate_icmp_type_code(self, icmp_type_code):
+ from_port = to_port = None
+ if isinstance(icmp_type_code, str):
+ code_parts = icmp_type_code.split(':')
+ if len(code_parts) > 1:
+ try:
+ from_port = int(code_parts[0])
+ to_port = int(code_parts[1])
+ except ValueError:
+ logger.error('port must be an integer.')
+ return (from_port, to_port)
+
+
+ def add_rule_to_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
+ port_range=None, icmp_type_code=None,
+ source_group_name=None, source_group_owner_id=None):
+
+ from_port, to_port = self._validate_port_range(port_range)
+ icmp_type = self._validate_icmp_type_code(icmp_type_code)
+ if icmp_type:
+ from_port, to_port = icmp_type[0], icmp_type[1]
+
+ if group_name:
+ conn = self.driver.euca_shell.get_euca_connection()
+ try:
+ conn.authorize_security_group(
+ group_name=group_name,
+ src_security_group_name=source_group_name,
+ src_security_group_owner_id=source_group_owner_id,
+ ip_protocol=protocol,
+ from_port=from_port,
+ to_port=to_port,
+ cidr_ip=cidr_ip,
+ )
+ except Exception, ex:
+ logger.log_exc("Failed to add rule to group %s" % group_name)
+
+
+ def remove_rule_from_group(self, group_name=None, protocol='tcp', cidr_ip='0.0.0.0/0',
+ port_range=None, icmp_type_code=None,
+ source_group_name=None, source_group_owner_id=None):
+
+ from_port, to_port = self._validate_port_range(port_range)
+ icmp_type = self._validate_icmp_type_code(icmp_type_code)
+ if icmp_type:
+ from_port, to_port = icmp_type[0], icmp_type[1]
+
+ if group_name:
+ conn = self.driver.euca_shell.get_euca_connection()
+ try:
+ conn.revoke_security_group(
+ group_name=group_name,
+ src_security_group_name=source_group_name,
+ src_security_group_owner_id=source_group_owner_id,
+ ip_protocol=protocol,
+ from_port=from_port,
+ to_port=to_port,
+ cidr_ip=ip,
+ )
+ except Exception, ex:
+ logger.log_exc("Failed to remove rule from group %s" % group_name)
+
# sort slivers by node id
for node_id in slice['node_ids']:
- sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], node_id),
+ sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], node_id, authority=self.driver.hrn),
'name': slice['name'],
'type': 'plab-vserver',
'tags': []})
if len(nodes) == 0:
raise SliverDoesNotExist("You have not allocated any slivers here")
+ # get login info
+ user = {}
+ if slice['person_ids']:
+ persons = self.shell.GetPersons(slice['person_ids'], ['key_ids'])
+ key_ids = [key_id for person in persons for key_id in person['key_ids']]
+ person_keys = self.shell.GetKeys(key_ids)
+ keys = [key['key'] for key in keys]
+
+ user.update({'urn': slice_urn,
+ 'login': slice['name'],
+ 'protocol': ['ssh'],
+ 'port': ['22'],
+ 'keys': keys})
+
site_ids = [node['site_id'] for node in nodes]
result = {}
result['geni_urn'] = slice_urn
result['pl_login'] = slice['name']
result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+ result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
resources = []
for node in nodes:
res['pl_hostname'] = node['hostname']
res['pl_boot_state'] = node['boot_state']
res['pl_last_contact'] = node['last_contact']
+ res['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
if node['last_contact'] is not None:
res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
- sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id'])
+ sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id'], authority=self.hrn)
res['geni_urn'] = sliver_id
if node['boot_state'] == 'boot':
res['geni_status'] = 'ready'
top_level_status = 'failed'
res['geni_error'] = ''
+ res['users'] = [user]
resources.append(res)
slices.verify_slice_attributes(slice, requested_attributes, options=options)
# add/remove slice from nodes
- requested_slivers = [node.get('component_name') for node in rspec.version.get_nodes_with_slivers()]
+ requested_slivers = []
+ for node in rspec.version.get_nodes_with_slivers():
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name')
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id'))
+ if hostname:
+ requested_slivers.append(hostname)
nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
# add/remove links links
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class FWRule(Element):
+ fields = [
+ 'protocol',
+ 'cidr_ip',
+ 'port_range',
+ 'icmp_type_code',
+ ]
+
'type',
'tags',
'disk_images',
+ 'fw_rules',
]
from sfa.rspecs.elements.element import Element
from sfa.rspecs.elements.sliver import Sliver
from sfa.rspecs.elements.versions.pgv2DiskImage import PGv2DiskImage
+from sfa.rspecs.elements.versions.plosv1FWRule import PLOSv1FWRule
class PGv2SliverType:
images = sliver.get('disk_images')
if images and isinstance(images, list):
PGv2DiskImage.add_images(sliver_elem, images)
+ fw_rules = sliver.get('fw_rules')
+ if fw_rules and isinstance(fw_rules, list):
+ PLOSv1FWRule.add_rules(sliver_elem, fw_rules)
PGv2SliverType.add_sliver_attributes(sliver_elem, sliver.get('tags', []))
@staticmethod
if 'name' in sliver_elem.attrib:
sliver['type'] = sliver_elem.attrib['name']
sliver['images'] = PGv2DiskImage.get_images(sliver_elem)
+ sliver['fw_rules'] = PLOSv1FWRule.get_rules(sliver_elem)
slivers.append(sliver)
return slivers
--- /dev/null
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.fw_rule import FWRule
+
+class PLOSv1FWRule:
+ @staticmethod
+ def add_rules(xml, rules):
+ if not rules:
+ return
+ for rule in rules:
+ rule_elem = xml.add_element('plos:fw_rule')
+ rule_elem.set('protocol', rule.get('protocol'))
+ rule_elem.set('port_range', rule.get('port_range'))
+ rule_elem.set('cidr_ip', rule.get('cidr_ip'))
+ rule_elem.set('icmp_type_code', rule.get('icmp_type_code'))
+
+ @staticmethod
+ def get_rules(xml):
+ rules = []
+ if 'plos' in xml.namespaces:
+ for rule_elem in xml.xpath('./plos:fw_rule | ./fw_rule'):
+ rule = FWRule(rule_elem.attrib, rule_elem)
+ rules.append(rule)
+ return rules
+
#!/usr/bin/python
from sfa.util.sfalogging import logger
-class BaseVersion:
+class RSpecVersion:
type = None
content_type = None
version = None
import os
from sfa.util.faults import InvalidRSpec, UnsupportedRSpecVersion
-from sfa.rspecs.baseversion import BaseVersion
+from sfa.rspecs.version import RSpecVersion
from sfa.util.sfalogging import logger
class VersionManager:
if num_parts > 2:
content_type = version_parts[2]
retval = self._get_version(type, version_num, content_type)
- elif isinstance(version, BaseVersion):
+ elif isinstance(version, RSpecVersion):
retval = version
else:
raise UnsupportedRSpecVersion("No such version: %s "% str(version))
from StringIO import StringIO
from sfa.util.xrn import Xrn, urn_to_sliver_id
from sfa.util.plxrn import hostname_to_urn, xrn_to_hostname
-from sfa.rspecs.baseversion import BaseVersion
+from sfa.rspecs.version import RSpecVersion
from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
from sfa.rspecs.elements.versions.pgv2Node import PGv2Node
from sfa.rspecs.elements.versions.pgv2SliverType import PGv2SliverType
-class PGv2(BaseVersion):
+class PGv2(RSpecVersion):
type = 'ProtoGENI'
content_type = 'ad'
version = '2'
extensions = {
'flack': "http://www.protogeni.net/resources/rspec/ext/flack/1",
'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
+ 'plos': "http://www.planet-lab.org/resources/sfa/ext/plos/1",
}
namespaces = dict(extensions.items() + [('default', namespace)])
enabled = True
content_type = 'ad'
schema = 'http://www.protogeni.net/resources/rspec/2/ad.xsd'
- template = '<rspec type="advertisement" xmlns="http://www.protogeni.net/resources/rspec/2" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.protogeni.net/resources/rspec/2 http://www.protogeni.net/resources/rspec/2/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+ template = '<rspec type="advertisement" xmlns="http://www.protogeni.net/resources/rspec/2" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.protogeni.net/resources/rspec/2 http://www.protogeni.net/resources/rspec/2/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd http://www.planet-lab.org/resources/sfa/ext/plos/1 http://www.planet-lab.org/resources/sfa/ext/plos/1/plos.xsd"/>'
class PGv2Request(PGv2):
enabled = True
content_type = 'request'
schema = 'http://www.protogeni.net/resources/rspec/2/request.xsd'
- template = '<rspec type="request" xmlns="http://www.protogeni.net/resources/rspec/2" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.protogeni.net/resources/rspec/2 http://www.protogeni.net/resources/rspec/2/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+ template = '<rspec type="request" xmlns="http://www.protogeni.net/resources/rspec/2" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.protogeni.net/resources/rspec/2 http://www.protogeni.net/resources/rspec/2/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd http://www.planet-lab.org/resources/sfa/ext/plos/1 http://www.planet-lab.org/resources/sfa/ext/plos/1/plos.xsd"/>'
class PGv2Manifest(PGv2):
enabled = True
content_type = 'manifest'
schema = 'http://www.protogeni.net/resources/rspec/2/manifest.xsd'
- template = '<rspec type="manifest" xmlns="http://www.protogeni.net/resources/rspec/2" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.protogeni.net/resources/rspec/2 http://www.protogeni.net/resources/rspec/2/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+ template = '<rspec type="manifest" xmlns="http://www.protogeni.net/resources/rspec/2" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.protogeni.net/resources/rspec/2 http://www.protogeni.net/resources/rspec/2/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd http://www.planet-lab.org/resources/sfa/ext/plos/1 http://www.planet-lab.org/resources/sfa/ext/plos/1/plos.xsd"/>'
extensions = {
'flack': "http://www.protogeni.net/resources/rspec/ext/flack/1",
'planetlab': "http://www.planet-lab.org/resources/sfa/ext/planetlab/1",
+ 'plos': "http://www.planet-lab.org/resources/sfa/ext/plos/1",
}
namespaces = dict(extensions.items() + [('default', namespace)])
elements = []
enabled = True
content_type = 'ad'
schema = 'http://www.geni.net/resources/rspec/3/ad.xsd'
- template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+ template = '<rspec type="advertisement" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd http://www.planet-lab.org/resources/sfa/ext/plos/1 http://www.planet-lab.org/resources/sfa/ext/plos/1/plos.xsd"/>'
class GENIv3Request(GENIv3):
enabled = True
content_type = 'request'
schema = 'http://www.geni.net/resources/rspec/3/request.xsd'
- template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+ template = '<rspec type="request" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/request.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd http://www.planet-lab.org/resources/sfa/ext/plos/1 http://www.planet-lab.org/resources/sfa/ext/plos/1/plos.xsd"/>'
class GENIv2Manifest(GENIv3):
enabled = True
content_type = 'manifest'
schema = 'http://www.geni.net/resources/rspec/3/manifest.xsd'
- template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd"/>'
+ template = '<rspec type="manifest" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.geni.net/resources/rspec/3" xmlns:plos="http://www.planet-lab.org/resources/sfa/ext/plos/1" xmlns:flack="http://www.protogeni.net/resources/rspec/ext/flack/1" xmlns:planetlab="http://www.planet-lab.org/resources/sfa/ext/planetlab/1" xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd http://www.planet-lab.org/resources/sfa/ext/planetlab/1 http://www.planet-lab.org/resources/sfa/ext/planetlab/1/planetlab.xsd http://www.planet-lab.org/resources/sfa/ext/plos/1 http://www.planet-lab.org/resources/sfa/ext/plos/1/plos.xsd"/>'
from sfa.util.sfalogging import logger
from sfa.util.xrn import hrn_to_urn, urn_to_hrn
from sfa.util.plxrn import PlXrn
-from sfa.rspecs.baseversion import BaseVersion
+from sfa.rspecs.version import RSpecVersion
from sfa.rspecs.elements.element import Element
from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
from sfa.rspecs.elements.versions.sfav1Node import SFAv1Node
from sfa.rspecs.elements.versions.sfav1Sliver import SFAv1Sliver
-class SFAv1(BaseVersion):
+class SFAv1(RSpecVersion):
enabled = True
type = 'SFA'
content_type = '*'
if format == 'text':
self.dump_text(dump_parents)
elif format == 'xml':
- print self.save_to_string()
+ print self.save_as_xml()
elif format == 'simple':
print self.dump_simple()
else:
def hrn_to_urn(hrn,type): return Xrn(hrn, type=type).urn
def hrn_authfor_hrn(parenthrn, hrn): return Xrn.hrn_is_auth_for_hrn(parenthrn, hrn)
-def urn_to_sliver_id(urn, slice_id, node_id, index=0):
- return Xrn(urn).get_sliver_id(slice_id, node_id, index)
+def urn_to_sliver_id(urn, slice_id, node_id, index=0, authority=None):
+ return Xrn(urn).get_sliver_id(slice_id, node_id, index, authority)
class Xrn:
self._normalize()
return ':'.join( [Xrn.unescape(x) for x in self.authority] )
- def get_sliver_id(self, slice_id, node_id, index=0):
+ def get_sliver_id(self, slice_id, node_id, index=0, authority=None):
self._normalize()
- return ":".join(map(str, [self.get_urn(), slice_id, node_id, index]))
+ urn = self.get_urn()
+ if authority:
+ authority_hrn = self.get_authority_hrn()
+ if not authority_hrn.startswith(authority):
+ hrn = ".".join([authority,self.get_authority_hrn(), self.get_leaf()])
+ else:
+ hrn = ".".join([self.get_authority_hrn(), self.get_leaf()])
+ urn = Xrn(hrn, self.get_type()).get_urn()
+ return ":".join(map(str, [urn, slice_id, node_id, index]))
def urn_to_hrn(self):
"""