--- /dev/null
+from sfa.generic import Generic
+
+class nitos (Generic):
+
+ # the importer class
+ def importer_class (self):
+ import sfa.importer.nitosimporter
+ return sfa.importer.nitosimporter.NitosImporter
+
+ # use the standard api class
+ def api_class (self):
+ import sfa.server.sfaapi
+ return sfa.server.sfaapi.SfaApi
+
+ # the manager classes for the server-side services
+ def registry_manager_class (self) :
+ import sfa.managers.registry_manager
+ return sfa.managers.registry_manager.RegistryManager
+ def slicemgr_manager_class (self) :
+ import sfa.managers.slice_manager
+ return sfa.managers.slice_manager.SliceManager
+ def aggregate_manager_class (self) :
+ import sfa.managers.aggregate_manager
+ return sfa.managers.aggregate_manager.AggregateManager
+
+ # driver class for server-side services, talk to the whole testbed
+ def driver_class (self):
+ import sfa.nitos.nitosdriver
+ return sfa.nitos.nitosdriver.NitosDriver
+
+ # for the component mode, to be run on board planetlab nodes
+ # manager class
+ def component_manager_class (self):
+ return None
+ # driver_class
+ def component_driver_class (self):
+ return None
--- /dev/null
+
+import os
+
+from sfa.util.config import Config
+from sfa.util.xrn import Xrn, get_leaf, get_authority, hrn_to_urn
+
+from sfa.trust.gid import create_uuid
+from sfa.trust.certificate import convert_public_key, Keypair
+
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord, RegAuthority, RegSlice, RegNode, RegUser, RegKey
+
+from sfa.nitos.nitosshell import NitosShell
+from sfa.nitos.nitosxrn import hostname_to_hrn, slicename_to_hrn, email_to_hrn, hrn_to_nitos_slicename, username_to_hrn
+
+def _get_site_hrn(interface_hrn, site):
+ hrn = ".".join([interface_hrn, site['name']])
+ return hrn
+
+
+class NitosImporter:
+
+ def __init__ (self, auth_hierarchy, logger):
+ self.auth_hierarchy = auth_hierarchy
+ self.logger=logger
+
+ def add_options (self, parser):
+ # we don't have any options for now
+ pass
+
+ # hrn hash is initialized from current db
+ # remember just-created records as we go
+ # xxx might make sense to add a UNIQUE constraint in the db itself
+ def remember_record_by_hrn (self, record):
+ tuple = (record.type, record.hrn)
+ if tuple in self.records_by_type_hrn:
+ self.logger.warning ("NitosImporter.remember_record_by_hrn: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_hrn [ tuple ] = record
+
+ # ditto for pointer hash
+ def remember_record_by_pointer (self, record):
+ if record.pointer == -1:
+ self.logger.warning ("NitosImporter.remember_record_by_pointer: pointer is void")
+ return
+ tuple = (record.type, record.pointer)
+ if tuple in self.records_by_type_pointer:
+ self.logger.warning ("NitosImporter.remember_record_by_pointer: duplicate (%s,%s)"%tuple)
+ return
+ self.records_by_type_pointer [ ( record.type, record.pointer,) ] = record
+
+ def remember_record (self, record):
+ self.remember_record_by_hrn (record)
+ self.remember_record_by_pointer (record)
+
+ def locate_by_type_hrn (self, type, hrn):
+ return self.records_by_type_hrn.get ( (type, hrn), None)
+
+ def locate_by_type_pointer (self, type, pointer):
+ return self.records_by_type_pointer.get ( (type, pointer), None)
+
+ # a convenience/helper function to see if a record is already known
+ # a former, broken, attempt (in 2.1-9) had been made
+ # to try and use 'pointer' as a first, most significant attempt
+ # the idea being to preserve stuff as much as possible, and thus
+ # to avoid creating a new gid in the case of a simple hrn rename
+ # however this of course doesn't work as the gid depends on the hrn...
+ #def locate (self, type, hrn=None, pointer=-1):
+ # if pointer!=-1:
+ # attempt = self.locate_by_type_pointer (type, pointer)
+ # if attempt : return attempt
+ # if hrn is not None:
+ # attempt = self.locate_by_type_hrn (type, hrn,)
+ # if attempt : return attempt
+ # return None
+
+ # this makes the run method a bit abtruse - out of the way
+
+ def run (self, options):
+ config = Config ()
+ interface_hrn = config.SFA_INTERFACE_HRN
+ root_auth = config.SFA_REGISTRY_ROOT_AUTH
+ shell = NitosShell (config)
+
+ ######## retrieve all existing SFA objects
+ all_records = dbsession.query(RegRecord).all()
+
+ # create hash by (type,hrn)
+ # we essentially use this to know if a given record is already known to SFA
+ self.records_by_type_hrn = \
+ dict ( [ ( (record.type, record.hrn) , record ) for record in all_records ] )
+ # create hash by (type,pointer)
+ self.records_by_type_pointer = \
+ dict ( [ ( (record.type, record.pointer) , record ) for record in all_records
+ if record.pointer != -1] )
+
+ # initialize record.stale to True by default, then mark stale=False on the ones that are in use
+ for record in all_records: record.stale=True
+
+ ######## retrieve NITOS data
+ # Get site info
+ # retrieve only required stuf
+ site = shell.getTestbedInfo()
+ sites = [site]
+ # create a hash of sites by login_base
+# # sites_by_login_base = dict ( [ ( site['login_base'], site ) for site in sites ] )
+ # Get all NITOS users
+ users = shell.getUsers()
+ # create a hash of users by user_id
+ users_by_id = dict ( [ ( user['user_id'], user) for user in users ] )
+ # Get all NITOS public keys
+ # accumulate key ids for keys retrieval
+# key_ids = []
+# for person in persons:
+# key_ids.extend(person['key_ids'])
+# keys = shell.GetKeys( {'peer_id': None, 'key_id': key_ids,
+# 'key_type': 'ssh'} )
+# # create a hash of keys by key_id
+# keys_by_id = dict ( [ ( key['key_id'], key ) for key in keys ] )
+ # create a dict user_id -> [ (nitos)keys ]
+ keys_by_user_id = dict ( [ ( user['user_id'], user['keys']) for user in users ] )
+ # Get all nitos nodes
+ nodes = shell.getNodes()
+ # create hash by node_id
+ nodes_by_id = dict ( [ (node['node_id'], node) for node in nodes ] )
+ # Get all nitos slices
+ slices = shell.getSlices()
+ # create hash by slice_id
+ slices_by_id = dict ( [ (slice['slice_id'], slice) for slice in slices ] )
+
+
+ # start importing
+ for site in sites:
+ #for i in [0]:
+ site_hrn = _get_site_hrn(interface_hrn, site)
+ # import if hrn is not in list of existing hrns or if the hrn exists
+ # but its not a site record
+ site_record=self.locate_by_type_hrn ('authority', site_hrn)
+ if not site_record:
+ try:
+ urn = hrn_to_urn(site_hrn, 'authority')
+ if not self.auth_hierarchy.auth_exists(urn):
+ self.auth_hierarchy.create_auth(urn)
+ auth_info = self.auth_hierarchy.get_auth_info(urn)
+ site_record = RegAuthority(hrn=site_hrn, gid=auth_info.get_gid_object(),
+ pointer=0,
+ authority=get_authority(site_hrn))
+ site_record.just_created()
+ dbsession.add(site_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported authority (site) : %s" % site_record)
+ self.remember_record (site_record)
+ except:
+ # if the site import fails then there is no point in trying to import the
+ # site's child records (node, slices, persons), so skip them.
+ self.logger.log_exc("NitosImporter: failed to import site. Skipping child records")
+ continue
+ else:
+ # xxx update the record ...
+ pass
+ site_record.stale=False
+
+ # import node records
+ for node in nodes:
+ site_auth = get_authority(site_hrn)
+ site_name = site['name']
+ node_hrn = hostname_to_hrn(site_auth, site_name, node['name'])
+ # xxx this sounds suspicious
+ if len(node_hrn) > 64: node_hrn = node_hrn[:64]
+ node_record = self.locate_by_type_hrn ( 'node', node_hrn )
+ if not node_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(node_hrn, 'node')
+ node_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ node_record = RegNode (hrn=node_hrn, gid=node_gid,
+ pointer =node['node_id'],
+ authority=get_authority(node_hrn))
+ node_record.just_created()
+ dbsession.add(node_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported node: %s" % node_record)
+ self.remember_record (node_record)
+ except:
+ self.logger.log_exc("NitosImporter: failed to import node")
+ else:
+ # xxx update the record ...
+ pass
+ node_record.stale=False
+
+
+ # import users
+ for user in users:
+ user_hrn = username_to_hrn(interface_hrn, site['name'], user['username'])
+ # xxx suspicious again
+ if len(user_hrn) > 64: user_hrn = user_hrn[:64]
+ user_urn = hrn_to_urn(user_hrn, 'user')
+
+ user_record = self.locate_by_type_hrn ( 'user', user_hrn)
+
+ # return a tuple pubkey (a nitos key object) and pkey (a Keypair object)
+ def init_user_key (user):
+ pubkey = None
+ pkey = None
+ if user['keys']:
+ # randomly pick first key in set
+ for key in user['keys']:
+ pubkey = key
+ try:
+ pkey = convert_public_key(pubkey)
+ break
+ except:
+ continue
+ if not pkey:
+ self.logger.warn('NitosImporter: unable to convert public key for %s' % user_hrn)
+ pkey = Keypair(create=True)
+ else:
+ # the user has no keys. Creating a random keypair for the user's gid
+ self.logger.warn("NitosImporter: user %s does not have a NITOS public key"%user_hrn)
+ pkey = Keypair(create=True)
+ return (pubkey, pkey)
+
+ # new user
+ try:
+ if not user_record:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ user_gid.set_email(user['email'])
+ user_record = RegUser (hrn=user_hrn, gid=user_gid,
+ pointer=user['user_id'],
+ authority=get_authority(user_hrn),
+ email=user['email'])
+ if pubkey:
+ user_record.reg_keys=[RegKey (pubkey)]
+ else:
+ self.logger.warning("No key found for user %s"%user_record)
+ user_record.just_created()
+ dbsession.add (user_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported user: %s" % user_record)
+ self.remember_record ( user_record )
+ else:
+ # update the record ?
+ # if user's primary key has changed then we need to update the
+ # users gid by forcing an update here
+ sfa_keys = user_record.reg_keys
+ def key_in_list (key,sfa_keys):
+ for reg_key in sfa_keys:
+ if reg_key.key==key: return True
+ return False
+ # is there a new key in NITOS ?
+ new_keys=False
+ for key in user['keys']:
+ if not key_in_list (key,sfa_keys):
+ new_keys = True
+ if new_keys:
+ (pubkey,pkey) = init_user_key (user)
+ user_gid = self.auth_hierarchy.create_gid(user_urn, create_uuid(), pkey)
+ if not pubkey:
+ user_record.reg_keys=[]
+ else:
+ user_record.reg_keys=[ RegKey (pubkey)]
+ self.logger.info("NitosImporter: updated user: %s" % user_record)
+ user_record.email = user['email']
+ dbsession.commit()
+ user_record.stale=False
+ except:
+ self.logger.log_exc("NitosImporter: failed to import user %s %s"%(user['user_id'],user['email']))
+
+
+ # import slices
+ for slice in slices:
+ slice_hrn = slicename_to_hrn(interface_hrn, site['name'], slice['slice_name'])
+ slice_record = self.locate_by_type_hrn ('slice', slice_hrn)
+ if not slice_record:
+ try:
+ pkey = Keypair(create=True)
+ urn = hrn_to_urn(slice_hrn, 'slice')
+ slice_gid = self.auth_hierarchy.create_gid(urn, create_uuid(), pkey)
+ slice_record = RegSlice (hrn=slice_hrn, gid=slice_gid,
+ pointer=slice['slice_id'],
+ authority=get_authority(slice_hrn))
+ slice_record.just_created()
+ dbsession.add(slice_record)
+ dbsession.commit()
+ self.logger.info("NitosImporter: imported slice: %s" % slice_record)
+ self.remember_record ( slice_record )
+ except:
+ self.logger.log_exc("NitosImporter: failed to import slice")
+ else:
+ # xxx update the record ...
+ self.logger.warning ("Slice update not yet implemented")
+ pass
+ # record current users affiliated with the slice
+ slice_record.reg_researchers = \
+ [ self.locate_by_type_pointer ('user',int(user_id)) for user_id in slice['user_id'] ]
+ dbsession.commit()
+ slice_record.stale=False
+
+
+ ### remove stale records
+ # special records must be preserved
+ system_hrns = [interface_hrn, root_auth, interface_hrn + '.slicemanager']
+ for record in all_records:
+ if record.hrn in system_hrns:
+ record.stale=False
+ if record.peer_authority:
+ record.stale=False
+
+ for record in all_records:
+ try: stale=record.stale
+ except:
+ stale=True
+ self.logger.warning("stale not found with %s"%record)
+ if stale:
+ self.logger.info("NitosImporter: deleting stale record: %s" % record)
+ dbsession.delete(record)
+ dbsession.commit()
+
+
+
+if __name__ == "__main__":
+ from sfa.util.sfalogging import logger
+ nitosimporter = NitosImporter("pla.nitos", logger)
+ nitosimporter.run(None)
--- /dev/null
+#!/usr/bin/python
+from sfa.util.xrn import Xrn, hrn_to_urn, urn_to_hrn, urn_to_sliver_id
+from sfa.util.sfatime import utcparse, datetime_to_string
+from sfa.util.sfalogging import logger
+
+from sfa.rspecs.rspec import RSpec
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.link import Link
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.login import Login
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.position_3d import Position3D
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.services import Services
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.granularity import Granularity
+from sfa.rspecs.elements.channel import Channel
+from sfa.rspecs.version_manager import VersionManager
+
+from sfa.nitos.nitosxrn import NitosXrn, hostname_to_urn, hrn_to_nitos_slicename, slicename_to_hrn
+from sfa.planetlab.vlink import get_tc_rate
+from sfa.planetlab.topology import Topology
+
+import time
+
+class NitosAggregate:
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_sites(self, filter={}):
+ sites = {}
+ for site in self.driver.shell.GetSites(filter):
+ sites[site['site_id']] = site
+ return sites
+
+ def get_interfaces(self, filter={}):
+ interfaces = {}
+ for interface in self.driver.shell.GetInterfaces(filter):
+ iface = Interface()
+ if interface['bwlimit']:
+ interface['bwlimit'] = str(int(interface['bwlimit'])/1000)
+ interfaces[interface['interface_id']] = interface
+ return interfaces
+
+ def get_links(self, sites, nodes, interfaces):
+
+ topology = Topology()
+ links = []
+ for (site_id1, site_id2) in topology:
+ site_id1 = int(site_id1)
+ site_id2 = int(site_id2)
+ link = Link()
+ if not site_id1 in sites or site_id2 not in sites:
+ continue
+ site1 = sites[site_id1]
+ site2 = sites[site_id2]
+ # get hrns
+ site1_hrn = self.driver.hrn + '.' + site1['login_base']
+ site2_hrn = self.driver.hrn + '.' + site2['login_base']
+
+ for s1_node_id in site1['node_ids']:
+ for s2_node_id in site2['node_ids']:
+ if s1_node_id not in nodes or s2_node_id not in nodes:
+ continue
+ node1 = nodes[s1_node_id]
+ node2 = nodes[s2_node_id]
+ # set interfaces
+ # just get first interface of the first node
+ if1_xrn = PlXrn(auth=self.driver.hrn, interface='node%s:eth0' % (node1['node_id']))
+ if1_ipv4 = interfaces[node1['interface_ids'][0]]['ip']
+ if2_xrn = PlXrn(auth=self.driver.hrn, interface='node%s:eth0' % (node2['node_id']))
+ if2_ipv4 = interfaces[node2['interface_ids'][0]]['ip']
+
+ if1 = Interface({'component_id': if1_xrn.urn, 'ipv4': if1_ipv4} )
+ if2 = Interface({'component_id': if2_xrn.urn, 'ipv4': if2_ipv4} )
+
+ # set link
+ link = Link({'capacity': '1000000', 'latency': '0', 'packet_loss': '0', 'type': 'ipv4'})
+ link['interface1'] = if1
+ link['interface2'] = if2
+ link['component_name'] = "%s:%s" % (site1['login_base'], site2['login_base'])
+ link['component_id'] = PlXrn(auth=self.driver.hrn, interface=link['component_name']).get_urn()
+ link['component_manager_id'] = hrn_to_urn(self.driver.hrn, 'authority+am')
+ links.append(link)
+
+ return links
+
+ def get_node_tags(self, filter={}):
+ node_tags = {}
+ for node_tag in self.driver.shell.GetNodeTags(filter):
+ node_tags[node_tag['node_tag_id']] = node_tag
+ return node_tags
+
+ def get_pl_initscripts(self, filter={}):
+ pl_initscripts = {}
+ filter.update({'enabled': True})
+ for initscript in self.driver.shell.GetInitScripts(filter):
+ pl_initscripts[initscript['initscript_id']] = initscript
+ return pl_initscripts
+
+
+ def get_slice_and_slivers(self, slice_xrn):
+ """
+ Returns a dict of slivers keyed on the sliver's node_id
+ """
+ slivers = {}
+ slice = None
+ if not slice_xrn:
+ return (slice, slivers)
+ slice_urn = hrn_to_urn(slice_xrn, 'slice')
+ slice_hrn, _ = urn_to_hrn(slice_xrn)
+ slice_name = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.driver.shell.getSlices()
+ # filter results
+ for slc in slices:
+ if slc['slice_name'] == slice_name:
+ slice = slc
+ break
+
+ if not slice:
+ return (slice, slivers)
+
+ reserved_nodes = self.driver.shell.getReservedNodes()
+ # filter results
+ for node in reserved_nodes:
+ if node['slice_id'] == slice['slice_id']:
+ slivers[node[node_id]] = node
+
+ return (slice, slivers)
+
+
+
+ def get_nodes_and_links(self, slice_xrn, slice=None,slivers={}, options={}):
+ # if we are dealing with a slice that has no node just return
+ # and empty list
+ if slice_xrn:
+ if not slice or not slivers:
+ return ([],[])
+ else:
+ nodes = [slivers[sliver] for sliver in slivers]
+ else:
+ nodes = self.driver.shell.getNodes()
+
+ # get the granularity in second for the reservation system
+ grain = self.driver.testbedInfo['grain']
+ #grain = 1800
+
+
+
+ rspec_nodes = []
+ for node in nodes:
+ rspec_node = Node()
+ site_name = self.driver.testbedInfo['name']
+ rspec_node['component_id'] = hostname_to_urn(self.driver.hrn, site_name, node['name'])
+ rspec_node['component_name'] = node['name']
+ rspec_node['component_manager_id'] = Xrn(self.driver.hrn, 'authority+cm').get_urn()
+ rspec_node['authority_id'] = hrn_to_urn(NitosXrn.site_hrn(self.driver.hrn, site_name), 'authority+sa')
+ # do not include boot state (<available> element) in the manifest rspec
+ #if not slice:
+ # rspec_node['boot_state'] = node['boot_state']
+ rspec_node['exclusive'] = 'true'
+ # site location
+ longitude = self.driver.testbedInfo['longitude']
+ latitude = self.driver.testbedInfo['latitude']
+ if longitude and latitude:
+ location = Location({'longitude': longitude, 'latitude': latitude, 'country': 'unknown'})
+ rspec_node['location'] = location
+ # 3D position
+ position_3d = Position3D({'x': node['position']['X'], 'y': node['position']['Y'], 'z': node['position']['Z']})
+ #position_3d = Position3D({'x': 1, 'y': 2, 'z': 3})
+ rspec_node['position_3d'] = position_3d
+ # Granularity
+ granularity = Granularity({'grain': grain})
+ rspec_node['granularity'] = granularity
+
+ # HardwareType
+ rspec_node['hardware_type'] = node['node_type']
+ #rspec_node['hardware_type'] = "orbit"
+
+
+ rspec_nodes.append(rspec_node)
+ return (rspec_nodes, [])
+
+ def get_leases_and_channels(self, slice=None, options={}):
+
+ slices = self.driver.shell.getSlices()
+ nodes = self.driver.shell.getNodes()
+ leases = self.driver.shell.getReservedNodes()
+ channels = self.driver.shell.getChannels()
+ reserved_channels = self.driver.shell.getReservedChannels()
+ grain = self.driver.testbedInfo['grain']
+
+ if slice:
+ for lease in leases:
+ if lease['slice_id'] != slice['slice_id']:
+ leases.remove(lease)
+ for channel in reserved_channels:
+ if channel['slice_id'] != slice['slice_id']:
+ reserved_channels.remove(channel)
+
+ rspec_channels = []
+ for channel in reserved_channels:
+
+ rspec_channel = {}
+ #retrieve channel number
+ for chl in channels:
+ if chl['channel_id'] == channel['channel_id']:
+ channel_number = chl['channel']
+ break
+
+ rspec_channel['channel_num'] = channel_number
+ rspec_channel['start_time'] = channel['start_time']
+ rspec_channel['duration'] = (int(channel['end_time']) - int(channel['start_time'])) / int(grain)
+
+ # retreive slicename
+ for slc in slices:
+ if slc['slice_id'] == channel['slice_id']:
+ slicename = slc['slice_name']
+ break
+
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ rspec_channel['slice_id'] = slice_urn
+ rspec_channels.append(rspec_channel)
+
+
+ rspec_leases = []
+ for lease in leases:
+
+ rspec_lease = Lease()
+
+ rspec_lease['lease_id'] = lease['reservation_id']
+ # retreive node name
+ for node in nodes:
+ if node['node_id'] == lease['node_id']:
+ nodename = node['name']
+ break
+
+ rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, self.driver.testbedInfo['name'], nodename)
+ # retreive slicename
+ for slc in slices:
+ if slc['slice_id'] == lease['slice_id']:
+ slicename = slc['slice_name']
+ break
+
+ slice_hrn = slicename_to_hrn(self.driver.hrn, self.driver.testbedInfo['name'], slicename)
+ slice_urn = hrn_to_urn(slice_hrn, 'slice')
+ rspec_lease['slice_id'] = slice_urn
+ rspec_lease['start_time'] = lease['start_time']
+ rspec_lease['duration'] = (int(lease['end_time']) - int(lease['start_time'])) / int(grain)
+ rspec_leases.append(rspec_lease)
+
+ return (rspec_leases, rspec_channels)
+
+
+ def get_channels(self, options={}):
+
+ filter = {}
+ channels = self.driver.shell.getChannels()
+ rspec_channels = []
+ for channel in channels:
+ rspec_channel = Channel()
+ rspec_channel['channel_num'] = channel['channel']
+ rspec_channel['frequency'] = channel['frequency']
+ rspec_channel['standard'] = channel['modulation']
+ rspec_channels.append(rspec_channel)
+ return rspec_channels
+
+
+
+ def get_rspec(self, slice_xrn=None, version = None, options={}):
+
+ version_manager = VersionManager()
+ version = version_manager.get_version(version)
+
+ if not slice_xrn:
+ rspec_version = version_manager._get_version(version.type, version.version, 'ad')
+ else:
+ rspec_version = version_manager._get_version(version.type, version.version, 'manifest')
+
+ slice, slivers = self.get_slice_and_slivers(slice_xrn)
+
+ rspec = RSpec(version=rspec_version, user_options=options)
+
+ if slice and 'expires' in slice:
+ rspec.xml.set('expires', datetime_to_string(utcparse(slice['expires'])))
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
+ nodes, links = self.get_nodes_and_links(slice_xrn, slice, slivers, options)
+ rspec.version.add_nodes(nodes)
+ rspec.version.add_links(links)
+ # add sliver defaults
+ default_sliver = slivers.get(None, [])
+ if default_sliver:
+ default_sliver_attribs = default_sliver.get('tags', [])
+ for attrib in default_sliver_attribs:
+ logger.info(attrib)
+ rspec.version.add_default_sliver_attribute(attrib['tagname'], attrib['value'])
+ # add wifi channels
+ channels = self.get_channels()
+ rspec.version.add_channels(channels)
+
+ if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
+ leases, channels = self.get_leases_and_channels(slice)
+ rspec.version.add_leases(leases, channels)
+
+ return rspec.toxml()
+
+
--- /dev/null
+import time
+import datetime
+#
+from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \
+ RecordNotFound, SfaNotImplemented, SliverDoesNotExist
+
+from sfa.util.sfalogging import logger
+from sfa.util.defaultdict import defaultdict
+from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch
+from sfa.util.xrn import hrn_to_urn, get_leaf, urn_to_sliver_id
+from sfa.util.cache import Cache
+
+# one would think the driver should not need to mess with the SFA db, but..
+from sfa.storage.alchemy import dbsession
+from sfa.storage.model import RegRecord
+
+# used to be used in get_ticket
+#from sfa.trust.sfaticket import SfaTicket
+
+from sfa.rspecs.version_manager import VersionManager
+from sfa.rspecs.rspec import RSpec
+
+# the driver interface, mostly provides default behaviours
+from sfa.managers.driver import Driver
+
+from sfa.nitos.nitosshell import NitosShell
+from sfa.nitos.nitosaggregate import NitosAggregate
+from sfa.nitos.nitosslices import NitosSlices
+
+from sfa.nitos.nitosxrn import NitosXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_nitos_slicename, xrn_to_hostname
+
+
+def list_to_dict(recs, key):
+ """
+ convert a list of dictionaries into a dictionary keyed on the
+ specified dictionary key
+ """
+ return dict ( [ (rec[key],rec) for rec in recs ] )
+
+#
+# NitosShell is just an xmlrpc serverproxy where methods
+# can be sent as-is; it takes care of authentication
+# from the global config
+#
+class NitosDriver (Driver):
+
+ # the cache instance is a class member so it survives across incoming requests
+ cache = None
+
+ def __init__ (self, config):
+ Driver.__init__ (self, config)
+ self.shell = NitosShell (config)
+ self.cache=None
+ self.testbedInfo = self.shell.getTestbedInfo()
+ if config.SFA_AGGREGATE_CACHING:
+ if NitosDriver.cache is None:
+ NitosDriver.cache = Cache()
+ self.cache = NitosDriver.cache
+
+ ########################################
+ ########## registry oriented
+ ########################################
+
+ def augment_records_with_testbed_info (self, sfa_records):
+ return self.fill_record_info (sfa_records)
+
+ ##########
+ def register (self, sfa_record, hrn, pub_key):
+ type = sfa_record['type']
+ nitos_record = self.sfa_fields_to_nitos_fields(type, hrn, sfa_record)
+
+ if type == 'authority':
+ pointer = 1
+
+ elif type == 'slice':
+ slices = self.shell.getSlices()
+ # filter slices
+ for slice in slices:
+ if slice['slice_name'] == nitos_record['name']:
+ slice_id = slice['slice_id']
+ break
+
+ if not slice_id:
+ pointer = self.shell.addSlice({slice_name : nitos_record['name']})
+ else:
+ pointer = slice_id
+
+ elif type == 'user':
+ users = self.shell.getUsers()
+ # filter users
+ for user in users:
+ if user['user_name'] == nitos_record['name']:
+ user_id = user['user_id']
+ break
+ if not user_id:
+ pointer = self.shell.addUser({username : nitos_record['name'], email : nitos_record['email']})
+ else:
+ pointer = user_id
+
+ # What roles should this user have?
+
+ # Add the user's key
+ if pub_key:
+ self.shell.addUserKey({user_id : pointer,'key' : pub_key})
+
+ elif type == 'node':
+ login_base = PlXrn(xrn=sfa_record['authority'],type='node').pl_login_base()
+ nodes = self.shell.GetNodes([pl_record['hostname']])
+ # filter nodes
+ for node in nodes:
+ if node['node_name'] == nitos_record['name']:
+ node_id = node['node_id']
+ break
+
+ if not node_id:
+ pointer = self.shell.addNode(nitos_record)
+ else:
+ pointer = node_id
+
+ return pointer
+
+ ##########
+ def update (self, old_sfa_record, new_sfa_record, hrn, new_key):
+ """
+ pointer = old_sfa_record['pointer']
+ type = old_sfa_record['type']
+
+ # new_key implemented for users only
+ if new_key and type not in [ 'user' ]:
+ raise UnknownSfaType(type)
+
+ if (type == "authority"):
+ #self.shell.UpdateSite(pointer, new_sfa_record)
+ pass
+
+ elif type == "slice":
+ nitos_record=self.sfa_fields_to_nitos_fields(type, hrn, new_sfa_record)
+ if 'name' in nitos_record:
+ nitos_record.pop('name')
+ self.shell.updateSlice(pointer, nitos_record)
+
+ elif type == "user":
+ # SMBAKER: UpdatePerson only allows a limited set of fields to be
+ # updated. Ideally we should have a more generic way of doing
+ # this. I copied the field names from UpdatePerson.py...
+ update_fields = {}
+ all_fields = new_sfa_record
+ for key in all_fields.keys():
+ if key in ['first_name', 'last_name', 'title', 'email',
+ 'password', 'phone', 'url', 'bio', 'accepted_aup',
+ 'enabled']:
+ update_fields[key] = all_fields[key]
+ # when updating a user, we always get a 'email' field at this point
+ # this is because 'email' is a native field in the RegUser object...
+ if 'email' in update_fields and not update_fields['email']:
+ del update_fields['email']
+ self.shell.UpdatePerson(pointer, update_fields)
+
+ if new_key:
+ # must check this key against the previous one if it exists
+ persons = self.shell.getUsers([pointer], ['key_ids'])
+ person = persons[0]
+ keys = person['key_ids']
+ keys = self.shell.GetKeys(person['key_ids'])
+
+ # Delete all stale keys
+ key_exists = False
+ for key in keys:
+ if new_key != key['key']:
+ self.shell.DeleteKey(key['key_id'])
+ else:
+ key_exists = True
+ if not key_exists:
+ self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key})
+
+ elif type == "node":
+ self.shell.UpdateNode(pointer, new_sfa_record)
+
+ return True
+ """
+ pass
+
+ ##########
+ def remove (self, sfa_record):
+ """
+ type=sfa_record['type']
+ pointer=sfa_record['pointer']
+ if type == 'user':
+ persons = self.shell.getUsers(pointer)
+ # only delete this person if he has site ids. if he doesnt, it probably means
+ # he was just removed from a site, not actually deleted
+ if persons and persons[0]['site_ids']:
+ self.shell.DeletePerson(pointer)
+ elif type == 'slice':
+ if self.shell.GetSlices(pointer):
+ self.shell.DeleteSlice(pointer)
+ elif type == 'node':
+ if self.shell.GetNodes(pointer):
+ self.shell.DeleteNode(pointer)
+ elif type == 'authority':
+ if self.shell.GetSites(pointer):
+ self.shell.DeleteSite(pointer)
+
+ return True
+ """
+ pass
+
+
+
+
+ ##
+ # Convert SFA fields to NITOS fields for use when registering or updating
+ # registry record in the PLC database
+ #
+
+ def sfa_fields_to_nitos_fields(self, type, hrn, sfa_record):
+
+ nitos_record = {}
+
+ if type == "slice":
+ nitos_record["name"] = hrn_to_nitos_slicename(hrn)
+ elif type == "node":
+ if not "hostname" in nitos_record:
+ # fetch from sfa_record
+ if "hostname" not in sfa_record:
+ raise MissingSfaInfo("hostname")
+ nitos_record["name"] = sfa_record["hostname"]
+ elif type == "authority":
+ nitos_record["name"] = NitosXrn(xrn=hrn,type='authority').nitos_login_base()
+ if "name" not in sfa_record:
+ nitos_record["name"] = hrn
+
+ return nitos_record
+
+ ####################
+ def fill_record_info(self, records):
+ """
+ Given a (list of) SFA record, fill in the PLC specific
+ and SFA specific fields in the record.
+ """
+ if not isinstance(records, list):
+ records = [records]
+
+ self.fill_record_pl_info(records)
+ self.fill_record_hrns(records)
+ self.fill_record_sfa_info(records)
+ return records
+
+ def fill_record_pl_info(self, records):
+ """
+ Fill in the planetlab specific fields of a SFA record. This
+ involves calling the appropriate PLC method to retrieve the
+ database record for the object.
+
+ @param record: record to fill in field (in/out param)
+ """
+ """
+ # get ids by type
+ node_ids, site_ids, slice_ids = [], [], []
+ person_ids, key_ids = [], []
+ type_map = {'node': node_ids, 'authority': site_ids,
+ 'slice': slice_ids, 'user': person_ids}
+
+ for record in records:
+ for type in type_map:
+ if type == record['type']:
+ type_map[type].append(record['pointer'])
+
+ # get pl records
+ nodes, sites, slices, persons, keys = {}, {}, {}, {}, {}
+ if node_ids:
+ node_list = self.shell.GetNodes(node_ids)
+ nodes = list_to_dict(node_list, 'node_id')
+ if site_ids:
+ site_list = self.shell.GetSites(site_ids)
+ sites = list_to_dict(site_list, 'site_id')
+ if slice_ids:
+ slice_list = self.shell.GetSlices(slice_ids)
+ slices = list_to_dict(slice_list, 'slice_id')
+ if person_ids:
+ person_list = self.shell.getUsers(person_ids)
+ persons = list_to_dict(person_list, 'person_id')
+ for person in persons:
+ key_ids.extend(persons[person]['key_ids'])
+
+ pl_records = {'node': nodes, 'authority': sites,
+ 'slice': slices, 'user': persons}
+
+ if key_ids:
+ key_list = self.shell.GetKeys(key_ids)
+ keys = list_to_dict(key_list, 'key_id')
+
+ # fill record info
+ for record in records:
+ # records with pointer==-1 do not have plc info.
+ # for example, the top level authority records which are
+ # authorities, but not PL "sites"
+ if record['pointer'] == -1:
+ continue
+
+ for type in pl_records:
+ if record['type'] == type:
+ if record['pointer'] in pl_records[type]:
+ record.update(pl_records[type][record['pointer']])
+ break
+ # fill in key info
+ if record['type'] == 'user':
+ if 'key_ids' not in record:
+ logger.info("user record has no 'key_ids' - need to import from myplc ?")
+ else:
+ pubkeys = [keys[key_id]['key'] for key_id in record['key_ids'] if key_id in keys]
+ record['keys'] = pubkeys
+
+ return records
+ """
+ pass
+ def fill_record_hrns(self, records):
+ """
+ convert pl ids to hrns
+ """
+ """
+
+ # get ids
+ slice_ids, person_ids, site_ids, node_ids = [], [], [], []
+ for record in records:
+ if 'site_id' in record:
+ site_ids.append(record['site_id'])
+ if 'site_ids' in record:
+ site_ids.extend(record['site_ids'])
+ if 'person_ids' in record:
+ person_ids.extend(record['person_ids'])
+ if 'slice_ids' in record:
+ slice_ids.extend(record['slice_ids'])
+ if 'node_ids' in record:
+ node_ids.extend(record['node_ids'])
+
+ # get pl records
+ slices, persons, sites, nodes = {}, {}, {}, {}
+ if site_ids:
+ site_list = self.shell.GetSites(site_ids, ['site_id', 'login_base'])
+ sites = list_to_dict(site_list, 'site_id')
+ if person_ids:
+ person_list = self.shell.getUsers(person_ids, ['person_id', 'email'])
+ persons = list_to_dict(person_list, 'person_id')
+ if slice_ids:
+ slice_list = self.shell.GetSlices(slice_ids, ['slice_id', 'name'])
+ slices = list_to_dict(slice_list, 'slice_id')
+ if node_ids:
+ node_list = self.shell.GetNodes(node_ids, ['node_id', 'hostname'])
+ nodes = list_to_dict(node_list, 'node_id')
+
+ # convert ids to hrns
+ for record in records:
+ # get all relevant data
+ type = record['type']
+ pointer = record['pointer']
+ auth_hrn = self.hrn
+ login_base = ''
+ if pointer == -1:
+ continue
+
+ if 'site_id' in record:
+ site = sites[record['site_id']]
+ login_base = site['login_base']
+ record['site'] = ".".join([auth_hrn, login_base])
+ if 'person_ids' in record:
+ emails = [persons[person_id]['email'] for person_id in record['person_ids'] \
+ if person_id in persons]
+ usernames = [email.split('@')[0] for email in emails]
+ person_hrns = [".".join([auth_hrn, login_base, username]) for username in usernames]
+ record['persons'] = person_hrns
+ if 'slice_ids' in record:
+ slicenames = [slices[slice_id]['name'] for slice_id in record['slice_ids'] \
+ if slice_id in slices]
+ slice_hrns = [slicename_to_hrn(auth_hrn, slicename) for slicename in slicenames]
+ record['slices'] = slice_hrns
+ if 'node_ids' in record:
+ hostnames = [nodes[node_id]['hostname'] for node_id in record['node_ids'] \
+ if node_id in nodes]
+ node_hrns = [hostname_to_hrn(auth_hrn, login_base, hostname) for hostname in hostnames]
+ record['nodes'] = node_hrns
+ if 'site_ids' in record:
+ login_bases = [sites[site_id]['login_base'] for site_id in record['site_ids'] \
+ if site_id in sites]
+ site_hrns = [".".join([auth_hrn, lbase]) for lbase in login_bases]
+ record['sites'] = site_hrns
+
+ if 'expires' in record:
+ date = utcparse(record['expires'])
+ datestring = datetime_to_string(date)
+ record['expires'] = datestring
+
+ return records
+ """
+ pass
+
+ def fill_record_sfa_info(self, records):
+ """
+ def startswith(prefix, values):
+ return [value for value in values if value.startswith(prefix)]
+
+ # get person ids
+ person_ids = []
+ site_ids = []
+ for record in records:
+ person_ids.extend(record.get("person_ids", []))
+ site_ids.extend(record.get("site_ids", []))
+ if 'site_id' in record:
+ site_ids.append(record['site_id'])
+
+ # get all pis from the sites we've encountered
+ # and store them in a dictionary keyed on site_id
+ site_pis = {}
+ if site_ids:
+ pi_filter = {'|roles': ['pi'], '|site_ids': site_ids}
+ pi_list = self.shell.getUsers(pi_filter, ['person_id', 'site_ids'])
+ for pi in pi_list:
+ # we will need the pi's hrns also
+ person_ids.append(pi['person_id'])
+
+ # we also need to keep track of the sites these pis
+ # belong to
+ for site_id in pi['site_ids']:
+ if site_id in site_pis:
+ site_pis[site_id].append(pi)
+ else:
+ site_pis[site_id] = [pi]
+
+ # get sfa records for all records associated with these records.
+ # we'll replace pl ids (person_ids) with hrns from the sfa records
+ # we obtain
+
+ # get the registry records
+ person_list, persons = [], {}
+ person_list = dbsession.query (RegRecord).filter(RegRecord.pointer.in_(person_ids))
+ # create a hrns keyed on the sfa record's pointer.
+ # Its possible for multiple records to have the same pointer so
+ # the dict's value will be a list of hrns.
+ persons = defaultdict(list)
+ for person in person_list:
+ persons[person.pointer].append(person)
+
+ # get the pl records
+ pl_person_list, pl_persons = [], {}
+ pl_person_list = self.shell.getUsers(person_ids, ['person_id', 'roles'])
+ pl_persons = list_to_dict(pl_person_list, 'person_id')
+
+ # fill sfa info
+ for record in records:
+ # skip records with no pl info (top level authorities)
+ #if record['pointer'] == -1:
+ # continue
+ sfa_info = {}
+ type = record['type']
+ logger.info("fill_record_sfa_info - incoming record typed %s"%type)
+ if (type == "slice"):
+ # all slice users are researchers
+ record['geni_urn'] = hrn_to_urn(record['hrn'], 'slice')
+ record['PI'] = []
+ record['researcher'] = []
+ for person_id in record.get('person_ids', []):
+ hrns = [person.hrn for person in persons[person_id]]
+ record['researcher'].extend(hrns)
+
+ # pis at the slice's site
+ if 'site_id' in record and record['site_id'] in site_pis:
+ pl_pis = site_pis[record['site_id']]
+ pi_ids = [pi['person_id'] for pi in pl_pis]
+ for person_id in pi_ids:
+ hrns = [person.hrn for person in persons[person_id]]
+ record['PI'].extend(hrns)
+ record['geni_creator'] = record['PI']
+
+ elif (type.startswith("authority")):
+ record['url'] = None
+ logger.info("fill_record_sfa_info - authority xherex")
+ if record['pointer'] != -1:
+ record['PI'] = []
+ record['operator'] = []
+ record['owner'] = []
+ for pointer in record.get('person_ids', []):
+ if pointer not in persons or pointer not in pl_persons:
+ # this means there is not sfa or pl record for this user
+ continue
+ hrns = [person.hrn for person in persons[pointer]]
+ roles = pl_persons[pointer]['roles']
+ if 'pi' in roles:
+ record['PI'].extend(hrns)
+ if 'tech' in roles:
+ record['operator'].extend(hrns)
+ if 'admin' in roles:
+ record['owner'].extend(hrns)
+ # xxx TODO: OrganizationName
+ elif (type == "node"):
+ sfa_info['dns'] = record.get("hostname", "")
+ # xxx TODO: URI, LatLong, IP, DNS
+
+ elif (type == "user"):
+ logger.info('setting user.email')
+ sfa_info['email'] = record.get("email", "")
+ sfa_info['geni_urn'] = hrn_to_urn(record['hrn'], 'user')
+ sfa_info['geni_certificate'] = record['gid']
+ # xxx TODO: PostalAddress, Phone
+ record.update(sfa_info)
+ """
+ pass
+
+ ####################
+ # plcapi works by changes, compute what needs to be added/deleted
+ def update_relation (self, subject_type, target_type, relation_name, subject_id, target_ids):
+ """
+ # hard-wire the code for slice/user for now, could be smarter if needed
+ if subject_type =='slice' and target_type == 'user' and relation_name == 'researcher':
+ subject=self.shell.GetSlices (subject_id)[0]
+ current_target_ids = subject['person_ids']
+ add_target_ids = list ( set (target_ids).difference(current_target_ids))
+ del_target_ids = list ( set (current_target_ids).difference(target_ids))
+ logger.debug ("subject_id = %s (type=%s)"%(subject_id,type(subject_id)))
+ for target_id in add_target_ids:
+ self.shell.AddPersonToSlice (target_id,subject_id)
+ logger.debug ("add_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ for target_id in del_target_ids:
+ logger.debug ("del_target_id = %s (type=%s)"%(target_id,type(target_id)))
+ self.shell.DeletePersonFromSlice (target_id, subject_id)
+ elif subject_type == 'authority' and target_type == 'user' and relation_name == 'pi':
+ # due to the plcapi limitations this means essentially adding pi role to all people in the list
+ # it's tricky to remove any pi role here, although it might be desirable
+ persons = self.shell.getUsers (target_ids)
+ for person in persons:
+ if 'pi' not in person['roles']:
+ self.shell.AddRoleToPerson('pi',person['person_id'])
+ else:
+ logger.info('unexpected relation %s to maintain, %s -> %s'%(relation_name,subject_type,target_type))
+
+ """
+ pass
+ ########################################
+ ########## aggregate oriented
+ ########################################
+
+ def testbed_name (self): return "nitos"
+
+ # 'geni_request_rspec_versions' and 'geni_ad_rspec_versions' are mandatory
+ def aggregate_version (self):
+ version_manager = VersionManager()
+ ad_rspec_versions = []
+ request_rspec_versions = []
+ for rspec_version in version_manager.versions:
+ if rspec_version.content_type in ['*', 'ad']:
+ ad_rspec_versions.append(rspec_version.to_dict())
+ if rspec_version.content_type in ['*', 'request']:
+ request_rspec_versions.append(rspec_version.to_dict())
+ return {
+ 'testbed':self.testbed_name(),
+ 'geni_request_rspec_versions': request_rspec_versions,
+ 'geni_ad_rspec_versions': ad_rspec_versions,
+ }
+
+ def list_slices (self, creds, options):
+ # look in cache first
+ if self.cache:
+ slices = self.cache.get('slices')
+ if slices:
+ logger.debug("NitosDriver.list_slices returns from cache")
+ return slices
+
+ # get data from db
+ slices = self.shell.getSlices()
+ # get site name
+ #site_name = self.shell.getTestbedInfo()['site_name']
+ site_name = "nitos"
+ slice_hrns = [slicename_to_hrn(self.hrn, site_name, slice['slice_name']) for slice in slices]
+ slice_urns = [hrn_to_urn(slice_hrn, 'slice') for slice_hrn in slice_hrns]
+
+ # cache the result
+ if self.cache:
+ logger.debug ("NitosDriver.list_slices stores value in cache")
+ self.cache.add('slices', slice_urns)
+
+ return slice_urns
+
+ # first 2 args are None in case of resource discovery
+ def list_resources (self, slice_urn, slice_hrn, creds, options):
+ cached_requested = options.get('cached', True)
+ version_manager = VersionManager()
+ # get the rspec's return format from options
+ rspec_version = version_manager.get_version(options.get('geni_rspec_version'))
+ version_string = "rspec_%s" % (rspec_version)
+
+ #panos adding the info option to the caching key (can be improved)
+ if options.get('info'):
+ version_string = version_string + "_"+options.get('info', 'default')
+
+ # Adding the list_leases option to the caching key
+ if options.get('list_leases'):
+ version_string = version_string + "_"+options.get('list_leases', 'default')
+
+ # Adding geni_available to caching key
+ if options.get('geni_available'):
+ version_string = version_string + "_" + str(options.get('geni_available'))
+
+ # look in cache first
+ if cached_requested and self.cache and not slice_hrn:
+ rspec = self.cache.get(version_string)
+ if rspec:
+ logger.debug("NitosDriver.ListResources: returning cached advertisement")
+ return rspec
+
+ #panos: passing user-defined options
+ #print "manager options = ",options
+ aggregate = NitosAggregate(self)
+ rspec = aggregate.get_rspec(slice_xrn=slice_urn, version=rspec_version,
+ options=options)
+
+ # cache the result
+ if self.cache and not slice_hrn:
+ logger.debug("NitosDriver.ListResources: stores advertisement in cache")
+ self.cache.add(version_string, rspec)
+
+ return rspec
+
+ def sliver_status (self, slice_urn, slice_hrn):
+ # find out where this slice is currently running
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+
+ slices = self.shell.getSlices()
+ # filter slicename
+ if len(slices) == 0:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ for slice in slices:
+ if slice['slice_name'] == slicename:
+ user_slice = slice
+ break
+
+ if not user_slice:
+ raise SliverDoesNotExist("%s (used %s as slicename internally)" % (slice_hrn, slicename))
+
+ # report about the reserved nodes only
+ reserved_nodes = self.shell.getReservedNodes()
+ nodes = self.shell.getNodes()
+
+ user_reserved_nodes = []
+ for r_node in reserved_nodes:
+ if r_node['slice_id'] == slice['slice_id']:
+ for node in nodes:
+ if node['id'] == r_node['node_id']:
+ user_reserved_nodes.append(node)
+
+
+
+
+ if len(user_reserved_nodes) == 0:
+ raise SliverDoesNotExist("You have not allocated any slivers here")
+
+##### continue from here
+ # get login info
+ user = {}
+ if slice['person_ids']:
+ persons = self.shell.GetPersons(slice['person_ids'], ['key_ids'])
+ key_ids = [key_id for person in persons for key_id in person['key_ids']]
+ person_keys = self.shell.GetKeys(key_ids)
+ keys = [key['key'] for key in person_keys]
+
+ user.update({'urn': slice_urn,
+ 'login': slice['name'],
+ 'protocol': ['ssh'],
+ 'port': ['22'],
+ 'keys': keys})
+
+ site_ids = [node['site_id'] for node in nodes]
+
+ result = {}
+ top_level_status = 'unknown'
+ if nodes:
+ top_level_status = 'ready'
+ result['geni_urn'] = slice_urn
+ result['pl_login'] = slice['name']
+ result['pl_expires'] = datetime_to_string(utcparse(slice['expires']))
+ result['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
+
+ resources = []
+ for node in nodes:
+ res = {}
+ res['pl_hostname'] = node['hostname']
+ res['pl_boot_state'] = node['boot_state']
+ res['pl_last_contact'] = node['last_contact']
+ res['geni_expires'] = datetime_to_string(utcparse(slice['expires']))
+ if node['last_contact'] is not None:
+
+ res['pl_last_contact'] = datetime_to_string(utcparse(node['last_contact']))
+ sliver_id = urn_to_sliver_id(slice_urn, slice['slice_id'], node['node_id'], authority=self.hrn)
+ res['geni_urn'] = sliver_id
+ if node['boot_state'] == 'boot':
+ res['geni_status'] = 'ready'
+ else:
+ res['geni_status'] = 'failed'
+ top_level_status = 'failed'
+
+ res['geni_error'] = ''
+ res['users'] = [user]
+
+ resources.append(res)
+
+ result['geni_status'] = top_level_status
+ result['geni_resources'] = resources
+
+ return result
+
+ def create_sliver (self, slice_urn, slice_hrn, creds, rspec_string, users, options):
+
+ aggregate = NitosAggregate(self)
+ slices = NitosSlices(self)
+ peer = slices.get_peer(slice_hrn)
+ sfa_peer = slices.get_sfa_peer(slice_hrn)
+ slice_record=None
+ if users:
+ slice_record = users[0].get('slice_record', {})
+
+ # parse rspec
+ rspec = RSpec(rspec_string)
+ requested_attributes = rspec.version.get_slice_attributes()
+
+ # ensure site record exists
+ site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure slice record exists
+ slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer, options=options)
+ # ensure person records exists
+ persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer, options=options)
+ # ensure slice attributes exists
+ slices.verify_slice_attributes(slice, requested_attributes, options=options)
+
+ # add/remove slice from nodes
+ requested_slivers = []
+ for node in rspec.version.get_nodes_with_slivers():
+ hostname = None
+ if node.get('component_name'):
+ hostname = node.get('component_name').strip()
+ elif node.get('component_id'):
+ hostname = xrn_to_hostname(node.get('component_id').strip())
+ if hostname:
+ requested_slivers.append(hostname)
+ nodes = slices.verify_slice_nodes(slice, requested_slivers, peer)
+
+ # add/remove links links
+ slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes)
+
+ # add/remove leases
+ requested_leases = []
+ kept_leases = []
+ for lease in rspec.version.get_leases():
+ requested_lease = {}
+ if not lease.get('lease_id'):
+ requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip())
+ requested_lease['start_time'] = lease.get('start_time')
+ requested_lease['duration'] = lease.get('duration')
+ else:
+ kept_leases.append(int(lease['lease_id']))
+ if requested_lease.get('hostname'):
+ requested_leases.append(requested_lease)
+
+ leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer)
+
+ return aggregate.get_rspec(slice_xrn=slice_urn, version=rspec.version)
+
+ def delete_sliver (self, slice_urn, slice_hrn, creds, options):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'name': slicename})
+ if not slices:
+ return 1
+ slice = slices[0]
+
+ # determine if this is a peer slice
+ # xxx I wonder if this would not need to use PlSlices.get_peer instead
+ # in which case plc.peers could be deprecated as this here
+ # is the only/last call to this last method in plc.peers
+ peer = peers.get_peer(self, slice_hrn)
+ try:
+ if peer:
+ self.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer)
+ self.shell.DeleteSliceFromNodes(slicename, slice['node_ids'])
+ finally:
+ if peer:
+ self.shell.BindObjectToPeer('slice', slice['slice_id'], peer, slice['peer_slice_id'])
+ return 1
+
+ def renew_sliver (self, slice_urn, slice_hrn, creds, expiration_time, options):
+ slicename = hrn_to_nitos_slicename(slice_hrn)
+ slices = self.shell.GetSlices({'name': slicename}, ['slice_id'])
+ if not slices:
+ raise RecordNotFound(slice_hrn)
+ slice = slices[0]
+ requested_time = utcparse(expiration_time)
+ record = {'expires': int(datetime_to_epoch(requested_time))}
+ try:
+ self.shell.UpdateSlice(slice['slice_id'], record)
+ return True
+ except:
+ return False
+
+
+ # xxx this code is quite old and has not run for ages
+ # it is obviously totally broken and needs a rewrite
+ def get_ticket (self, slice_urn, slice_hrn, creds, rspec_string, options):
+ raise SfaNotImplemented,"NitosDriver.get_ticket needs a rewrite"
+# please keep this code for future reference
+# slices = PlSlices(self)
+# peer = slices.get_peer(slice_hrn)
+# sfa_peer = slices.get_sfa_peer(slice_hrn)
+#
+# # get the slice record
+# credential = api.getCredential()
+# interface = api.registries[api.hrn]
+# registry = api.server_proxy(interface, credential)
+# records = registry.Resolve(xrn, credential)
+#
+# # make sure we get a local slice record
+# record = None
+# for tmp_record in records:
+# if tmp_record['type'] == 'slice' and \
+# not tmp_record['peer_authority']:
+# #Error (E0602, GetTicket): Undefined variable 'SliceRecord'
+# slice_record = SliceRecord(dict=tmp_record)
+# if not record:
+# raise RecordNotFound(slice_hrn)
+#
+# # similar to CreateSliver, we must verify that the required records exist
+# # at this aggregate before we can issue a ticket
+# # parse rspec
+# rspec = RSpec(rspec_string)
+# requested_attributes = rspec.version.get_slice_attributes()
+#
+# # ensure site record exists
+# site = slices.verify_site(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure slice record exists
+# slice = slices.verify_slice(slice_hrn, slice_record, peer, sfa_peer)
+# # ensure person records exists
+# # xxx users is undefined in this context
+# persons = slices.verify_persons(slice_hrn, slice, users, peer, sfa_peer)
+# # ensure slice attributes exists
+# slices.verify_slice_attributes(slice, requested_attributes)
+#
+# # get sliver info
+# slivers = slices.get_slivers(slice_hrn)
+#
+# if not slivers:
+# raise SliverDoesNotExist(slice_hrn)
+#
+# # get initscripts
+# initscripts = []
+# data = {
+# 'timestamp': int(time.time()),
+# 'initscripts': initscripts,
+# 'slivers': slivers
+# }
+#
+# # create the ticket
+# object_gid = record.get_gid_object()
+# new_ticket = SfaTicket(subject = object_gid.get_subject())
+# new_ticket.set_gid_caller(api.auth.client_gid)
+# new_ticket.set_gid_object(object_gid)
+# new_ticket.set_issuer(key=api.key, subject=self.hrn)
+# new_ticket.set_pubkey(object_gid.get_pubkey())
+# new_ticket.set_attributes(data)
+# new_ticket.set_rspec(rspec)
+# #new_ticket.set_parent(api.auth.hierarchy.get_auth_ticket(auth_hrn))
+# new_ticket.encode()
+# new_ticket.sign()
+#
+# return new_ticket.save_to_string(save_parents=True)
--- /dev/null
+import sys
+import xmlrpclib
+import socket
+from urlparse import urlparse
+
+from sfa.util.sfalogging import logger
+
+class NitosShell:
+ """
+ A simple xmlrpc shell to a myplc instance
+ This class can receive all NITOS API calls to the underlying testbed
+ For safety this is limited to a set of hard-coded calls
+ """
+
+ direct_calls = ['getNodes','getChannels','getSlices','getUsers','getReservedNodes',
+ 'getReservedChannels','getTestbedInfo'
+ ]
+
+
+ # use the 'capability' auth mechanism for higher performance when the PLC db is local
+ def __init__ ( self, config ) :
+ url = config.SFA_NITOS_URL
+# url = "http://195.251.17.239:8080/RPC2"
+ # try to figure if the url is local
+ hostname=urlparse(url).hostname
+ is_local=False
+ if hostname == 'localhost': is_local=True
+ # otherwise compare IP addresses;
+ # this might fail for any number of reasons, so let's harden that
+ try:
+ # xxx todo this seems to result in a DNS request for each incoming request to the AM
+ # should be cached or improved
+ url_ip=socket.gethostbyname(hostname)
+ local_ip=socket.gethostbyname(socket.gethostname())
+ if url_ip==local_ip: is_local=True
+ except:
+ pass
+
+ if is_local:
+ try:
+ # too bad this is not installed properly
+ plcapi_path="/usr/share/plc_api"
+ if plcapi_path not in sys.path: sys.path.append(plcapi_path)
+ import PLC.Shell
+ plc_direct_access=True
+ except:
+ plc_direct_access=False
+ if is_local and plc_direct_access:
+ logger.debug('plshell access - capability')
+ #self.plauth = { 'AuthMethod': 'capability',
+ # 'Username': config.SFA_PLC_USER,
+ # 'AuthString': config.SFA_PLC_PASSWORD,
+ # }
+ self.proxy = PLC.Shell.Shell ()
+
+ else:
+ logger.debug('nitosshell access - xmlrpc')
+ #self.plauth = { 'AuthMethod': 'password',
+ # 'Username': config.SFA_PLC_USER,
+ # 'AuthString': config.SFA_PLC_PASSWORD,
+ # }
+ self.proxy = xmlrpclib.Server(url, verbose = False, allow_none = True)
+
+ def __getattr__(self, name):
+ def func(*args, **kwds):
+ actual_name=None
+ if name in NitosShell.direct_calls: actual_name=name
+# if name in NitosShell.alias_calls: actual_name=NitosShell.alias_calls[name]
+ if not actual_name:
+ raise Exception, "Illegal method call %s for NITOS driver"%(name)
+ actual_name = "scheduler.server." + actual_name
+ result=getattr(self.proxy, actual_name)(*args, **kwds)
+ logger.debug('NitosShell %s (%s) returned ... '%(name,actual_name))
+ return result
+ return func
+
--- /dev/null
+from types import StringTypes
+from collections import defaultdict
+
+from sfa.util.sfatime import utcparse, datetime_to_epoch
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import Xrn, get_leaf, get_authority, urn_to_hrn
+
+from sfa.rspecs.rspec import RSpec
+
+from sfa.planetlab.vlink import VLink
+from sfa.planetlab.plxrn import PlXrn, hrn_to_pl_slicename
+
+MAXINT = 2L**31-1
+
+class NitosSlices:
+
+ rspec_to_slice_tag = {'max_rate':'net_max_rate'}
+
+ def __init__(self, driver):
+ self.driver = driver
+
+ def get_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+ #Does this slice belong to a local site or a peer NITOS site?
+ peer = None
+
+ # get this slice's authority (site)
+ slice_authority = get_authority(hrn)
+
+ # get this site's authority (sfa root authority or sub authority)
+ site_authority = get_authority(slice_authority).lower()
+
+ # check if we are already peered with this site_authority, if so
+ peers = self.driver.shell.GetPeers({}, ['peer_id', 'peername', 'shortname', 'hrn_root'])
+ for peer_record in peers:
+ names = [name.lower() for name in peer_record.values() if isinstance(name, StringTypes)]
+ if site_authority in names:
+ peer = peer_record
+
+ return peer
+
+ def get_sfa_peer(self, xrn):
+ hrn, type = urn_to_hrn(xrn)
+
+ # return the authority for this hrn or None if we are the authority
+ sfa_peer = None
+ slice_authority = get_authority(hrn)
+ site_authority = get_authority(slice_authority)
+
+ if site_authority != self.driver.hrn:
+ sfa_peer = site_authority
+
+ return sfa_peer
+
+ def verify_slice_leases(self, slice, requested_leases, kept_leases, peer):
+
+ leases = self.driver.shell.GetLeases({'name':slice['name']}, ['lease_id'])
+ grain = self.driver.shell.GetLeaseGranularity()
+ current_leases = [lease['lease_id'] for lease in leases]
+ deleted_leases = list(set(current_leases).difference(kept_leases))
+
+ try:
+ if peer:
+ self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+ deleted=self.driver.shell.DeleteLeases(deleted_leases)
+ for lease in requested_leases:
+ added=self.driver.shell.AddLeases(lease['hostname'], slice['name'], int(lease['start_time']), int(lease['duration']) * grain + int(lease['start_time']))
+
+ except:
+ logger.log_exc('Failed to add/remove slice leases')
+
+ return leases
+
+
+ def verify_slice_nodes(self, slice, requested_slivers, peer):
+
+ nodes = self.driver.shell.GetNodes(slice['node_ids'], ['node_id', 'hostname', 'interface_ids'])
+ current_slivers = [node['hostname'] for node in nodes]
+
+ # remove nodes not in rspec
+ deleted_nodes = list(set(current_slivers).difference(requested_slivers))
+
+ # add nodes from rspec
+ added_nodes = list(set(requested_slivers).difference(current_slivers))
+
+ try:
+ if peer:
+ self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+ self.driver.shell.AddSliceToNodes(slice['name'], added_nodes)
+ self.driver.shell.DeleteSliceFromNodes(slice['name'], deleted_nodes)
+
+ except:
+ logger.log_exc('Failed to add/remove slice from nodes')
+ return nodes
+
+ def free_egre_key(self):
+ used = set()
+ for tag in self.driver.shell.GetSliceTags({'tagname': 'egre_key'}):
+ used.add(int(tag['value']))
+
+ for i in range(1, 256):
+ if i not in used:
+ key = i
+ break
+ else:
+ raise KeyError("No more EGRE keys available")
+
+ return str(key)
+
+ def verify_slice_links(self, slice, requested_links, nodes):
+ # nodes is undefined here
+ if not requested_links:
+ return
+
+ # build dict of nodes
+ nodes_dict = {}
+ interface_ids = []
+ for node in nodes:
+ nodes_dict[node['node_id']] = node
+ interface_ids.extend(node['interface_ids'])
+ # build dict of interfaces
+ interfaces = self.driver.shell.GetInterfaces(interface_ids)
+ interfaces_dict = {}
+ for interface in interfaces:
+ interfaces_dict[interface['interface_id']] = interface
+
+ slice_tags = []
+
+ # set egre key
+ slice_tags.append({'name': 'egre_key', 'value': self.free_egre_key()})
+
+ # set netns
+ slice_tags.append({'name': 'netns', 'value': '1'})
+
+ # set cap_net_admin
+ # need to update the attribute string?
+ slice_tags.append({'name': 'capabilities', 'value': 'CAP_NET_ADMIN'})
+
+ for link in requested_links:
+ # get the ip address of the first node in the link
+ ifname1 = Xrn(link['interface1']['component_id']).get_leaf()
+ (node_raw, device) = ifname1.split(':')
+ node_id = int(node_raw.replace('node', ''))
+ node = nodes_dict[node_id]
+ if1 = interfaces_dict[node['interface_ids'][0]]
+ ipaddr = if1['ip']
+ topo_rspec = VLink.get_topo_rspec(link, ipaddr)
+ # set topo_rspec tag
+ slice_tags.append({'name': 'topo_rspec', 'value': str([topo_rspec]), 'node_id': node_id})
+ # set vini_topo tag
+ slice_tags.append({'name': 'vini_topo', 'value': 'manual', 'node_id': node_id})
+ #self.driver.shell.AddSliceTag(slice['name'], 'topo_rspec', str([topo_rspec]), node_id)
+
+ self.verify_slice_attributes(slice, slice_tags, {'append': True}, admin=True)
+
+
+
+ def handle_peer(self, site, slice, persons, peer):
+ if peer:
+ # bind site
+ try:
+ if site:
+ self.driver.shell.BindObjectToPeer('site', site['site_id'], peer['shortname'], slice['site_id'])
+ except Exception,e:
+ self.driver.shell.DeleteSite(site['site_id'])
+ raise e
+
+ # bind slice
+ try:
+ if slice:
+ self.driver.shell.BindObjectToPeer('slice', slice['slice_id'], peer['shortname'], slice['slice_id'])
+ except Exception,e:
+ self.driver.shell.DeleteSlice(slice['slice_id'])
+ raise e
+
+ # bind persons
+ for person in persons:
+ try:
+ self.driver.shell.BindObjectToPeer('person',
+ person['person_id'], peer['shortname'], person['peer_person_id'])
+
+ for (key, remote_key_id) in zip(person['keys'], person['key_ids']):
+ try:
+ self.driver.shell.BindObjectToPeer( 'key', key['key_id'], peer['shortname'], remote_key_id)
+ except:
+ self.driver.shell.DeleteKey(key['key_id'])
+ logger("failed to bind key: %s to peer: %s " % (key['key_id'], peer['shortname']))
+ except Exception,e:
+ self.driver.shell.DeletePerson(person['person_id'])
+ raise e
+
+ return slice
+
+ def verify_site(self, slice_xrn, slice_record={}, peer=None, sfa_peer=None, options={}):
+ (slice_hrn, type) = urn_to_hrn(slice_xrn)
+ site_hrn = get_authority(slice_hrn)
+ # login base can't be longer than 20 characters
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ authority_name = slicename.split('_')[0]
+ login_base = authority_name[:20]
+ sites = self.driver.shell.GetSites(login_base)
+ if not sites:
+ # create new site record
+ site = {'name': 'geni.%s' % authority_name,
+ 'abbreviated_name': authority_name,
+ 'login_base': login_base,
+ 'max_slices': 100,
+ 'max_slivers': 1000,
+ 'enabled': True,
+ 'peer_site_id': None}
+ if peer:
+ site['peer_site_id'] = slice_record.get('site_id', None)
+ site['site_id'] = self.driver.shell.AddSite(site)
+ # exempt federated sites from monitor policies
+ self.driver.shell.AddSiteTag(site['site_id'], 'exempt_site_until', "20200101")
+
+# # is this still necessary?
+# # add record to the local registry
+# if sfa_peer and slice_record:
+# peer_dict = {'type': 'authority', 'hrn': site_hrn, \
+# 'peer_authority': sfa_peer, 'pointer': site['site_id']}
+# self.registry.register_peer_object(self.credential, peer_dict)
+ else:
+ site = sites[0]
+ if peer:
+ # unbind from peer so we can modify if necessary. Will bind back later
+ self.driver.shell.UnBindObjectFromPeer('site', site['site_id'], peer['shortname'])
+
+ return site
+
+ def verify_slice(self, slice_hrn, slice_record, peer, sfa_peer, options={}):
+ slicename = hrn_to_pl_slicename(slice_hrn)
+ parts = slicename.split("_")
+ login_base = parts[0]
+ slices = self.driver.shell.GetSlices([slicename])
+ if not slices:
+ slice = {'name': slicename,
+ 'url': slice_record.get('url', slice_hrn),
+ 'description': slice_record.get('description', slice_hrn)}
+ # add the slice
+ slice['slice_id'] = self.driver.shell.AddSlice(slice)
+ slice['node_ids'] = []
+ slice['person_ids'] = []
+ if peer:
+ slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ # mark this slice as an sfa peer record
+# if sfa_peer:
+# peer_dict = {'type': 'slice', 'hrn': slice_hrn,
+# 'peer_authority': sfa_peer, 'pointer': slice['slice_id']}
+# self.registry.register_peer_object(self.credential, peer_dict)
+ else:
+ slice = slices[0]
+ if peer:
+ slice['peer_slice_id'] = slice_record.get('slice_id', None)
+ # unbind from peer so we can modify if necessary. Will bind back later
+ self.driver.shell.UnBindObjectFromPeer('slice', slice['slice_id'], peer['shortname'])
+ #Update existing record (e.g. expires field) it with the latest info.
+ if slice_record.get('expires'):
+ requested_expires = int(datetime_to_epoch(utcparse(slice_record['expires'])))
+ if requested_expires and slice['expires'] != requested_expires:
+ self.driver.shell.UpdateSlice( slice['slice_id'], {'expires' : requested_expires})
+
+ return slice
+
+ #def get_existing_persons(self, users):
+ def verify_persons(self, slice_hrn, slice_record, users, peer, sfa_peer, options={}):
+ users_by_email = {}
+ users_by_site = defaultdict(list)
+ users_dict = {}
+ for user in users:
+ user['urn'] = user['urn'].lower()
+ hrn, type = urn_to_hrn(user['urn'])
+ username = get_leaf(hrn)
+ login_base = PlXrn(xrn=user['urn']).pl_login_base()
+ user['username'] = username
+ user['site'] = login_base
+
+ if 'email' in user:
+ user['email'] = user['email'].lower()
+ users_by_email[user['email']] = user
+ users_dict[user['email']] = user
+ else:
+ users_by_site[user['site']].append(user)
+
+ # start building a list of existing users
+ existing_user_ids = []
+ existing_user_ids_filter = []
+ if users_by_email:
+ existing_user_ids_filter.extend(users_by_email.keys())
+ if users_by_site:
+ for login_base in users_by_site:
+ users = users_by_site[login_base]
+ for user in users:
+ existing_user_ids_filter.append(user['username']+'@geni.net')
+ if existing_user_ids_filter:
+ # get existing users by email
+ existing_users = self.driver.shell.GetPersons({'email': existing_user_ids_filter},
+ ['person_id', 'key_ids', 'email'])
+ existing_user_ids.extend([user['email'] for user in existing_users])
+
+ if users_by_site:
+ # get a list of user sites (based on requeste user urns
+ site_list = self.driver.shell.GetSites(users_by_site.keys(), \
+ ['site_id', 'login_base', 'person_ids'])
+ # get all existing users at these sites
+ sites = {}
+ site_user_ids = []
+ for site in site_list:
+ sites[site['site_id']] = site
+ site_user_ids.extend(site['person_ids'])
+
+ existing_site_persons_list = self.driver.shell.GetPersons(site_user_ids,
+ ['person_id', 'key_ids', 'email', 'site_ids'])
+
+ # all requested users are either existing users or new (added) users
+ for login_base in users_by_site:
+ requested_site_users = users_by_site[login_base]
+ for requested_user in requested_site_users:
+ user_found = False
+ for existing_user in existing_site_persons_list:
+ for site_id in existing_user['site_ids']:
+ if site_id in sites:
+ site = sites[site_id]
+ if login_base == site['login_base'] and \
+ existing_user['email'].startswith(requested_user['username']+'@'):
+ existing_user_ids.append(existing_user['email'])
+ requested_user['email'] = existing_user['email']
+ users_dict[existing_user['email']] = requested_user
+ user_found = True
+ break
+ if user_found:
+ break
+
+ if user_found == False:
+ fake_email = requested_user['username'] + '@geni.net'
+ requested_user['email'] = fake_email
+ users_dict[fake_email] = requested_user
+
+ # requested slice users
+ requested_user_ids = users_dict.keys()
+ # existing slice users
+ existing_slice_users_filter = {'person_id': slice_record.get('person_ids', [])}
+ existing_slice_users = self.driver.shell.GetPersons(existing_slice_users_filter,
+ ['person_id', 'key_ids', 'email'])
+ existing_slice_user_ids = [user['email'] for user in existing_slice_users]
+
+ # users to be added, removed or updated
+ added_user_ids = set(requested_user_ids).difference(existing_user_ids)
+ added_slice_user_ids = set(requested_user_ids).difference(existing_slice_user_ids)
+ removed_user_ids = set(existing_slice_user_ids).difference(requested_user_ids)
+ updated_user_ids = set(existing_slice_user_ids).intersection(requested_user_ids)
+
+ # Remove stale users (only if we are not appending).
+ # Append by default.
+ append = options.get('append', True)
+ if append == False:
+ for removed_user_id in removed_user_ids:
+ self.driver.shell.DeletePersonFromSlice(removed_user_id, slice_record['name'])
+ # update_existing users
+ updated_users_list = [user for user in users_dict.values() if user['email'] in \
+ updated_user_ids]
+ self.verify_keys(existing_slice_users, updated_users_list, peer, options)
+
+ added_persons = []
+ # add new users
+ for added_user_id in added_user_ids:
+ added_user = users_dict[added_user_id]
+ hrn, type = urn_to_hrn(added_user['urn'])
+ person = {
+ 'first_name': added_user.get('first_name', hrn),
+ 'last_name': added_user.get('last_name', hrn),
+ 'email': added_user_id,
+ 'peer_person_id': None,
+ 'keys': [],
+ 'key_ids': added_user.get('key_ids', []),
+ }
+ person['person_id'] = self.driver.shell.AddPerson(person)
+ if peer:
+ person['peer_person_id'] = added_user['person_id']
+ added_persons.append(person)
+
+ # enable the account
+ self.driver.shell.UpdatePerson(person['person_id'], {'enabled': True})
+
+ # add person to site
+ self.driver.shell.AddPersonToSite(added_user_id, added_user['site'])
+
+ for key_string in added_user.get('keys', []):
+ key = {'key':key_string, 'key_type':'ssh'}
+ key['key_id'] = self.driver.shell.AddPersonKey(person['person_id'], key)
+ person['keys'].append(key)
+
+ # add the registry record
+# if sfa_peer:
+# peer_dict = {'type': 'user', 'hrn': hrn, 'peer_authority': sfa_peer, \
+# 'pointer': person['person_id']}
+# self.registry.register_peer_object(self.credential, peer_dict)
+
+ for added_slice_user_id in added_slice_user_ids.union(added_user_ids):
+ # add person to the slice
+ self.driver.shell.AddPersonToSlice(added_slice_user_id, slice_record['name'])
+ # if this is a peer record then it should already be bound to a peer.
+ # no need to return worry about it getting bound later
+
+ return added_persons
+
+
+ def verify_keys(self, persons, users, peer, options={}):
+ # existing keys
+ key_ids = []
+ for person in persons:
+ key_ids.extend(person['key_ids'])
+ keylist = self.driver.shell.GetKeys(key_ids, ['key_id', 'key'])
+ keydict = {}
+ for key in keylist:
+ keydict[key['key']] = key['key_id']
+ existing_keys = keydict.keys()
+ persondict = {}
+ for person in persons:
+ persondict[person['email']] = person
+
+ # add new keys
+ requested_keys = []
+ updated_persons = []
+ for user in users:
+ user_keys = user.get('keys', [])
+ updated_persons.append(user)
+ for key_string in user_keys:
+ requested_keys.append(key_string)
+ if key_string not in existing_keys:
+ key = {'key': key_string, 'key_type': 'ssh'}
+ try:
+ if peer:
+ person = persondict[user['email']]
+ self.driver.shell.UnBindObjectFromPeer('person', person['person_id'], peer['shortname'])
+ key['key_id'] = self.driver.shell.AddPersonKey(user['email'], key)
+ if peer:
+ key_index = user_keys.index(key['key'])
+ remote_key_id = user['key_ids'][key_index]
+ self.driver.shell.BindObjectToPeer('key', key['key_id'], peer['shortname'], remote_key_id)
+
+ finally:
+ if peer:
+ self.driver.shell.BindObjectToPeer('person', person['person_id'], peer['shortname'], user['person_id'])
+
+ # remove old keys (only if we are not appending)
+ append = options.get('append', True)
+ if append == False:
+ removed_keys = set(existing_keys).difference(requested_keys)
+ for existing_key_id in keydict:
+ if keydict[existing_key_id] in removed_keys:
+ try:
+ if peer:
+ self.driver.shell.UnBindObjectFromPeer('key', existing_key_id, peer['shortname'])
+ self.driver.shell.DeleteKey(existing_key_id)
+ except:
+ pass
+
+ def verify_slice_attributes(self, slice, requested_slice_attributes, options={}, admin=False):
+ append = options.get('append', True)
+ # get list of attributes users ar able to manage
+ filter = {'category': '*slice*'}
+ if not admin:
+ filter['|roles'] = ['user']
+ slice_attributes = self.driver.shell.GetTagTypes(filter)
+ valid_slice_attribute_names = [attribute['tagname'] for attribute in slice_attributes]
+
+ # get sliver attributes
+ added_slice_attributes = []
+ removed_slice_attributes = []
+ ignored_slice_attribute_names = []
+ existing_slice_attributes = self.driver.shell.GetSliceTags({'slice_id': slice['slice_id']})
+
+ # get attributes that should be removed
+ for slice_tag in existing_slice_attributes:
+ if slice_tag['tagname'] in ignored_slice_attribute_names:
+ # If a slice already has a admin only role it was probably given to them by an
+ # admin, so we should ignore it.
+ ignored_slice_attribute_names.append(slice_tag['tagname'])
+ else:
+ # If an existing slice attribute was not found in the request it should
+ # be removed
+ attribute_found=False
+ for requested_attribute in requested_slice_attributes:
+ if requested_attribute['name'] == slice_tag['tagname'] and \
+ requested_attribute['value'] == slice_tag['value']:
+ attribute_found=True
+ break
+
+ if not attribute_found and not append:
+ removed_slice_attributes.append(slice_tag)
+
+ # get attributes that should be added:
+ for requested_attribute in requested_slice_attributes:
+ # if the requested attribute wasn't found we should add it
+ if requested_attribute['name'] in valid_slice_attribute_names:
+ attribute_found = False
+ for existing_attribute in existing_slice_attributes:
+ if requested_attribute['name'] == existing_attribute['tagname'] and \
+ requested_attribute['value'] == existing_attribute['value']:
+ attribute_found=True
+ break
+ if not attribute_found:
+ added_slice_attributes.append(requested_attribute)
+
+
+ # remove stale attributes
+ for attribute in removed_slice_attributes:
+ try:
+ self.driver.shell.DeleteSliceTag(attribute['slice_tag_id'])
+ except Exception, e:
+ logger.warn('Failed to remove sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+ % (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
+
+ # add requested_attributes
+ for attribute in added_slice_attributes:
+ try:
+ self.driver.shell.AddSliceTag(slice['name'], attribute['name'], attribute['value'], attribute.get('node_id', None))
+ except Exception, e:
+ logger.warn('Failed to add sliver attribute. name: %s, value: %s, node_id: %s\nCause:%s'\
+ % (slice['name'], attribute['value'], attribute.get('node_id'), str(e)))
+
--- /dev/null
+# specialized Xrn class for NITOS
+import re
+from sfa.util.xrn import Xrn
+
+# temporary helper functions to use this module instead of namespace
+def hostname_to_hrn (auth, login_base, hostname):
+ return NitosXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
+def hostname_to_urn(auth, login_base, hostname):
+ return NitosXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
+def slicename_to_hrn (auth_hrn,site_name,slicename):
+ return NitosXrn(auth=auth_hrn+'.'+site_name,slicename=slicename).get_hrn()
+# hack to convert nitos user name to hrn
+def username_to_hrn (auth_hrn,site_name,username):
+ return NitosXrn(auth=auth_hrn+'.'+site_name,slicename=username).get_hrn()
+def email_to_hrn (auth_hrn, email):
+ return NitosXrn(auth=auth_hrn, email=email).get_hrn()
+def hrn_to_nitos_slicename (hrn):
+ return NitosXrn(xrn=hrn,type='slice').nitos_slicename()
+# removed-dangerous - was used for non-slice objects
+#def hrn_to_nitos_login_base (hrn):
+# return NitosXrn(xrn=hrn,type='slice').nitos_login_base()
+def hrn_to_nitos_authname (hrn):
+ return NitosXrn(xrn=hrn,type='any').nitos_authname()
+def xrn_to_hostname(hrn):
+ return Xrn.unescape(NitosXrn(xrn=hrn, type='node').get_leaf())
+
+class NitosXrn (Xrn):
+
+ @staticmethod
+ def site_hrn (auth, login_base):
+ return '.'.join([auth,login_base])
+
+ def __init__ (self, auth=None, hostname=None, slicename=None, email=None, interface=None, **kwargs):
+ #def hostname_to_hrn(auth_hrn, login_base, hostname):
+ if hostname is not None:
+ self.type='node'
+ # keep only the first part of the DNS name
+ #self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
+ # escape the '.' in the hostname
+ self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
+ self.hrn_to_urn()
+ #def slicename_to_hrn(auth_hrn, slicename):
+ elif slicename is not None:
+ self.type='slice'
+ self.hrn = ".".join([auth] + [slicename.replace(".", "_")])
+ self.hrn_to_urn()
+ #def email_to_hrn(auth_hrn, email):
+ elif email is not None:
+ self.type='person'
+ # keep only the part before '@' and replace special chars into _
+ self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
+ self.hrn_to_urn()
+ elif interface is not None:
+ self.type = 'interface'
+ self.hrn = auth + '.' + interface
+ self.hrn_to_urn()
+ else:
+ Xrn.__init__ (self,**kwargs)
+
+ #def hrn_to_pl_slicename(hrn):
+ def nitos_slicename (self):
+ self._normalize()
+ leaf = self.leaf
+ sliver_id_parts = leaf.split(':')
+ name = sliver_id_parts[0]
+ name = re.sub('[^a-zA-Z0-9_]', '', name)
+ #return self.nitos_login_base() + '_' + name
+ return name
+
+ #def hrn_to_pl_authname(hrn):
+ def nitos_authname (self):
+ self._normalize()
+ return self.authority[-1]
+
+ def interface_name(self):
+ self._normalize()
+ return self.leaf
+
+ def nitos_login_base (self):
+ self._normalize()
+ if self.type and self.type.startswith('authority'):
+ base = self.leaf
+ else:
+ base = self.authority[-1]
+
+ # Fix up names of GENI Federates
+ base = base.lower()
+ base = re.sub('\\\[^a-zA-Z0-9]', '', base)
+
+ if len(base) > 20:
+ base = base[len(base)-20:]
+
+ return base
+
+
+if __name__ == '__main__':
+
+ #nitosxrn = NitosXrn(auth="omf.nitos",slicename="aminesl")
+ #slice_hrn = nitosxrn.get_hrn()
+ #slice_name = NitosXrn(xrn="omf.nitos.aminesl",type='slice').nitos_slicename()
+ slicename = "giorgos_n"
+ hrn = slicename_to_hrn("pla", "nitos", slicename)
+ print hrn
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Channel(Element):
+
+ fields = [
+ 'channel_num',
+ 'frequency',
+ 'standard',
+ ]
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Position3D(Element):
+
+ fields = [
+ 'x',
+ 'y',
+ 'z',
+ ]
--- /dev/null
+from sfa.rspecs.elements.element import Element
+
+class Spectrum(Element):
+
+ fields = []
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+from sfa.rspecs.elements.spectrum import Spectrum
+from sfa.rspecs.elements.channel import Channel
+
+from sfa.planetlab.plxrn import xrn_to_hostname
+
+class NITOSv1Channel:
+
+ @staticmethod
+ def add_channels(xml, channels):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(channels) > 0:
+ #network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_urn = "pla"
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+# spectrum_elems = xml.xpath('//spectrum')
+# spectrum_elem = xml.add_element('spectrum')
+
+# if len(spectrum_elems) > 0:
+# spectrum_elem = spectrum_elems[0]
+# elif len(channels) > 0:
+# spectrum_elem = xml.add_element('spectrum')
+# else:
+# spectrum_elem = xml
+
+ spectrum_elem = network_elem.add_instance('spectrum', [])
+
+ channel_elems = []
+ for channel in channels:
+ channel_fields = ['channel_num', 'frequency', 'standard']
+ channel_elem = spectrum_elem.add_instance('channel', channel, channel_fields)
+ channel_elems.append(channel_elem)
+
+
+ @staticmethod
+ def get_channels(xml, filter={}):
+ xpath = '//channel%s | //default:channel%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ channel_elems = xml.xpath(xpath)
+ return NITOSv1Channel.get_channel_objs(channel_elems)
+
+ @staticmethod
+ def get_channel_objs(channel_elems):
+ channels = []
+ for channel_elem in channel_elems:
+ channel = Channel(channel_elem.attrib, channel_elem)
+ channel['channel_num'] = channel_elem.attrib['channel_num']
+ channel['frequency'] = channel_elem.attrib['frequency']
+ channel['standard'] = channel_elem.attrib['standard']
+
+ channels.append(channel)
+ return channels
+
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+from sfa.rspecs.elements.lease import Lease
+
+from sfa.nitos.nitosxrn import xrn_to_hostname
+
+class NITOSv1Lease:
+
+ @staticmethod
+ def add_leases(xml, leases, channels):
+
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(leases) > 0:
+ network_urn = Xrn(leases[0]['component_id']).get_authority_urn().split(':')[0]
+ network_elem = xml.add_element('network', name = network_urn)
+ else:
+ network_elem = xml
+
+ # group the leases by slice and timeslots
+ grouped_leases = []
+
+ while leases:
+ slice_id = leases[0]['slice_id']
+ start_time = leases[0]['start_time']
+ duration = leases[0]['duration']
+ group = []
+
+ for lease in leases:
+ if slice_id == lease['slice_id'] and start_time == lease['start_time'] and duration == lease['duration']:
+ group.append(lease)
+
+ grouped_leases.append(group)
+
+ for lease1 in group:
+ leases.remove(lease1)
+
+ lease_elems = []
+ for lease in grouped_leases:
+ #lease_fields = ['lease_id', 'component_id', 'slice_id', 'start_time', 'duration']
+ lease_fields = ['slice_id', 'start_time', 'duration']
+ lease_elem = network_elem.add_instance('lease', lease[0], lease_fields)
+ lease_elems.append(lease_elem)
+
+ # add nodes of this lease
+ for node in lease:
+ lease_elem.add_instance('node', node, ['component_id'])
+
+ # add reserved channels of this lease
+ #channels = [{'channel_id': 1}, {'channel_id': 2}]
+ for channel in channels:
+ if channel['slice_id'] == lease[0]['slice_id'] and channel['start_time'] == lease[0]['start_time'] and channel['duration'] == lease[0]['duration']:
+ lease_elem.add_instance('channel', channel, ['channel_num'])
+
+
+ @staticmethod
+ def get_leases(xml, filter={}):
+ xpath = '//lease%s | //default:lease%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ lease_elems = xml.xpath(xpath)
+ return NITOSv1Lease.get_lease_objs(lease_elems)
+
+ @staticmethod
+ def get_lease_objs(lease_elems):
+ leases = []
+ for lease_elem in lease_elems:
+ lease = Lease(lease_elem.attrib, lease_elem)
+ if lease.get('lease_id'):
+ lease['lease_id'] = lease_elem.attrib['lease_id']
+ lease['component_id'] = lease_elem.attrib['component_id']
+ lease['slice_id'] = lease_elem.attrib['slice_id']
+ lease['start_time'] = lease_elem.attrib['start_time']
+ lease['duration'] = lease_elem.attrib['duration']
+
+ leases.append(lease)
+ return leases
+
--- /dev/null
+from sfa.util.sfalogging import logger
+from sfa.util.xml import XpathFilter
+from sfa.util.xrn import Xrn
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.node import Node
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.location import Location
+from sfa.rspecs.elements.position_3d import Position3D
+from sfa.rspecs.elements.hardware_type import HardwareType
+from sfa.rspecs.elements.disk_image import DiskImage
+from sfa.rspecs.elements.interface import Interface
+from sfa.rspecs.elements.bwlimit import BWlimit
+from sfa.rspecs.elements.pltag import PLTag
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+from sfa.rspecs.elements.versions.pgv2Services import PGv2Services
+
+from sfa.nitos.nitosxrn import xrn_to_hostname
+
+class NITOSv1Node:
+
+ @staticmethod
+ def add_nodes(xml, nodes):
+ network_elems = xml.xpath('//network')
+ if len(network_elems) > 0:
+ network_elem = network_elems[0]
+ elif len(nodes) > 0 and nodes[0].get('component_manager_id'):
+ network_urn = nodes[0]['component_manager_id']
+ network_elem = xml.add_element('network', name = Xrn(network_urn).get_hrn())
+ else:
+ network_elem = xml
+
+ node_elems = []
+ for node in nodes:
+ node_fields = ['component_manager_id', 'component_id', 'boot_state']
+ node_elem = network_elem.add_instance('node', node, node_fields)
+ node_elems.append(node_elem)
+
+ # determine network hrn
+ network_hrn = None
+ if 'component_manager_id' in node and node['component_manager_id']:
+ network_hrn = Xrn(node['component_manager_id']).get_hrn()
+
+ # set component_name attribute and hostname element
+ if 'component_id' in node and node['component_id']:
+ component_name = xrn_to_hostname(node['component_id'])
+ node_elem.set('component_name', component_name)
+ hostname_elem = node_elem.add_element('hostname')
+ hostname_elem.set_text(component_name)
+
+ # set site id
+ if 'authority_id' in node and node['authority_id']:
+ node_elem.set('site_id', node['authority_id'])
+
+ # add locaiton
+ location = node.get('location')
+ if location:
+ node_elem.add_instance('location', location, Location.fields)
+
+ # add 3D Position of the node
+ position_3d = node.get('position_3d')
+ if position_3d:
+ node_elem.add_instance('position_3d', position_3d, Position3D.fields)
+
+
+ # add granularity of the reservation system
+ granularity = node.get('granularity')['grain']
+ if granularity:
+ #node_elem.add_instance('granularity', granularity, granularity.fields)
+ granularity_elem = node_elem.add_element('granularity')
+ granularity_elem.set_text(str(granularity))
+ # add hardware type
+ #hardware_type = node.get('hardware_type')
+ #if hardware_type:
+ # node_elem.add_instance('hardware_type', hardware_type)
+ hardware_type_elem = node_elem.add_element('hardware_type')
+ hardware_type_elem.set_text(node.get('hardware_type'))
+
+
+ if isinstance(node.get('interfaces'), list):
+ for interface in node.get('interfaces', []):
+ node_elem.add_instance('interface', interface, ['component_id', 'client_id', 'ipv4'])
+
+ #if 'bw_unallocated' in node and node['bw_unallocated']:
+ # bw_unallocated = etree.SubElement(node_elem, 'bw_unallocated', units='kbps').text = str(int(node['bw_unallocated'])/1000)
+
+ PGv2Services.add_services(node_elem, node.get('services', []))
+ tags = node.get('tags', [])
+ if tags:
+ for tag in tags:
+ tag_elem = node_elem.add_element(tag['tagname'])
+ tag_elem.set_text(tag['value'])
+ NITOSv1Sliver.add_slivers(node_elem, node.get('slivers', []))
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ component_ids = []
+ for sliver in slivers:
+ filter = {}
+ if isinstance(sliver, str):
+ filter['component_id'] = '*%s*' % sliver
+ sliver = {}
+ elif 'component_id' in sliver and sliver['component_id']:
+ filter['component_id'] = '*%s*' % sliver['component_id']
+ if not filter:
+ continue
+ nodes = NITOSv1Node.get_nodes(xml, filter)
+ if not nodes:
+ continue
+ node = nodes[0]
+ NITOSv1Sliver.add_slivers(node, sliver)
+
+ @staticmethod
+ def remove_slivers(xml, hostnames):
+ for hostname in hostnames:
+ nodes = NITOSv1Node.get_nodes(xml, {'component_id': '*%s*' % hostname})
+ for node in nodes:
+ slivers = NITOSv1Sliver.get_slivers(node.element)
+ for sliver in slivers:
+ node.element.remove(sliver.element)
+
+ @staticmethod
+ def get_nodes(xml, filter={}):
+ xpath = '//node%s | //default:node%s' % (XpathFilter.xpath(filter), XpathFilter.xpath(filter))
+ node_elems = xml.xpath(xpath)
+ return NITOSv1Node.get_node_objs(node_elems)
+
+ @staticmethod
+ def get_nodes_with_slivers(xml):
+ xpath = '//node[count(sliver)>0] | //default:node[count(default:sliver)>0]'
+ node_elems = xml.xpath(xpath)
+ return NITOSv1Node.get_node_objs(node_elems)
+
+
+ @staticmethod
+ def get_node_objs(node_elems):
+ nodes = []
+ for node_elem in node_elems:
+ node = Node(node_elem.attrib, node_elem)
+ if 'site_id' in node_elem.attrib:
+ node['authority_id'] = node_elem.attrib['site_id']
+ # get location
+ location_elems = node_elem.xpath('./default:location | ./location')
+ locations = [loc_elem.get_instance(Location) for loc_elem in location_elems]
+ if len(locations) > 0:
+ node['location'] = locations[0]
+ # get bwlimit
+ bwlimit_elems = node_elem.xpath('./default:bw_limit | ./bw_limit')
+ bwlimits = [bwlimit_elem.get_instance(BWlimit) for bwlimit_elem in bwlimit_elems]
+ if len(bwlimits) > 0:
+ node['bwlimit'] = bwlimits[0]
+ # get interfaces
+ iface_elems = node_elem.xpath('./default:interface | ./interface')
+ ifaces = [iface_elem.get_instance(Interface) for iface_elem in iface_elems]
+ node['interfaces'] = ifaces
+ # get services
+ node['services'] = PGv2Services.get_services(node_elem)
+ # get slivers
+ node['slivers'] = NITOSv1Sliver.get_slivers(node_elem)
+ # get tags
+ node['tags'] = NITOSv1PLTag.get_pl_tags(node_elem, ignore=Node.fields+["hardware_type"])
+ # get hardware types
+ hardware_type_elems = node_elem.xpath('./default:hardware_type | ./hardware_type')
+ node['hardware_types'] = [hw_type.get_instance(HardwareType) for hw_type in hardware_type_elems]
+
+ # temporary... play nice with old slice manager rspec
+ if not node['component_name']:
+ hostname_elem = node_elem.find("hostname")
+ if hostname_elem != None:
+ node['component_name'] = hostname_elem.text
+
+ nodes.append(node)
+ return nodes
+
--- /dev/null
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.pltag import PLTag
+
+class NITOSv1PLTag:
+ @staticmethod
+ def add_pl_tag(xml, name, value):
+ for pl_tag in pl_tags:
+ pl_tag_elem = xml.add_element(name)
+ pl_tag_elem.set_text(value)
+
+ @staticmethod
+ def get_pl_tags(xml, ignore=[]):
+ pl_tags = []
+ for elem in xml.iterchildren():
+ if elem.tag not in ignore:
+ pl_tag = PLTag({'tagname': elem.tag, 'value': elem.text})
+ pl_tags.append(pl_tag)
+ return pl_tags
+
--- /dev/null
+from sfa.util.xrn import Xrn
+from sfa.util.xml import XmlElement
+
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.sliver import Sliver
+from sfa.rspecs.elements.versions.nitosv1PLTag import NITOSv1PLTag
+
+from sfa.planetlab.plxrn import PlXrn
+
+class NITOSv1Sliver:
+
+ @staticmethod
+ def add_slivers(xml, slivers):
+ if not slivers:
+ return
+ if not isinstance(slivers, list):
+ slivers = [slivers]
+ for sliver in slivers:
+ sliver_elem = xml.add_instance('sliver', sliver, ['name'])
+ tags = sliver.get('tags', [])
+ if tags:
+ for tag in tags:
+ NITOSv1Sliver.add_sliver_attribute(sliver_elem, tag['tagname'], tag['value'])
+ if sliver.get('sliver_id'):
+ name = PlXrn(xrn=sliver.get('sliver_id')).pl_slicename()
+ sliver_elem.set('name', name)
+
+ @staticmethod
+ def add_sliver_attribute(xml, name, value):
+ elem = xml.add_element(name)
+ elem.set_text(value)
+
+ @staticmethod
+ def get_sliver_attributes(xml):
+ attribs = []
+ for elem in xml.iterchildren():
+ if elem.tag not in Sliver.fields:
+ xml_element = XmlElement(elem, xml.namespaces)
+ instance = Element(fields=xml_element, element=elem)
+ instance['name'] = elem.tag
+ instance['value'] = elem.text
+ attribs.append(instance)
+ return attribs
+
+ @staticmethod
+ def get_slivers(xml, filter={}):
+ xpath = './default:sliver | ./sliver'
+ sliver_elems = xml.xpath(xpath)
+ slivers = []
+ for sliver_elem in sliver_elems:
+ sliver = Sliver(sliver_elem.attrib,sliver_elem)
+ if 'component_id' in xml.attrib:
+ sliver['component_id'] = xml.attrib['component_id']
+ sliver['tags'] = NITOSv1Sliver.get_sliver_attributes(sliver_elem)
+ slivers.append(sliver)
+ return slivers
+
SLIVER_TYPE='SLIVER_TYPE',
LEASE='LEASE',
GRANULARITY='GRANULARITY',
+ SPECTRUM='SPECTRUM',
+ CHANNEL='CHANNEL',
+ POSITION_3D ='POSITION_3D',
)
class RSpecElement:
--- /dev/null
+from copy import deepcopy
+from lxml import etree
+
+from sfa.util.sfalogging import logger
+from sfa.util.xrn import hrn_to_urn, urn_to_hrn
+from sfa.rspecs.version import RSpecVersion
+from sfa.rspecs.elements.element import Element
+from sfa.rspecs.elements.versions.pgv2Link import PGv2Link
+from sfa.rspecs.elements.versions.nitosv1Node import NITOSv1Node
+from sfa.rspecs.elements.versions.nitosv1Sliver import NITOSv1Sliver
+from sfa.rspecs.elements.versions.nitosv1Lease import NITOSv1Lease
+from sfa.rspecs.elements.versions.nitosv1Channel import NITOSv1Channel
+
+class NITOSv1(RSpecVersion):
+ enabled = True
+ type = 'NITOS'
+ content_type = '*'
+ version = '1'
+ schema = None
+ namespace = None
+ extensions = {}
+ namespaces = None
+ template = '<RSpec type="%s"></RSpec>' % type
+
+ # Network
+ def get_networks(self):
+ network_elems = self.xml.xpath('//network')
+ networks = [network_elem.get_instance(fields=['name', 'slice']) for \
+ network_elem in network_elems]
+ return networks
+
+
+ def add_network(self, network):
+ network_tags = self.xml.xpath('//network[@name="%s"]' % network)
+ if not network_tags:
+ network_tag = self.xml.add_element('network', name=network)
+ else:
+ network_tag = network_tags[0]
+ return network_tag
+
+
+ # Nodes
+
+ def get_nodes(self, filter=None):
+ return NITOSv1Node.get_nodes(self.xml, filter)
+
+ def get_nodes_with_slivers(self):
+ return NITOSv1Node.get_nodes_with_slivers(self.xml)
+
+ def add_nodes(self, nodes, network = None, no_dupes=False):
+ NITOSv1Node.add_nodes(self.xml, nodes)
+
+ def merge_node(self, source_node_tag, network, no_dupes=False):
+ if no_dupes and self.get_node_element(node['hostname']):
+ # node already exists
+ return
+
+ network_tag = self.add_network(network)
+ network_tag.append(deepcopy(source_node_tag))
+
+ # Slivers
+
+ def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False):
+ # add slice name to network tag
+ network_tags = self.xml.xpath('//network')
+ if network_tags:
+ network_tag = network_tags[0]
+ network_tag.set('slice', urn_to_hrn(sliver_urn)[0])
+
+ # add slivers
+ sliver = {'name':sliver_urn,
+ 'pl_tags': attributes}
+ for hostname in hostnames:
+ if sliver_urn:
+ sliver['name'] = sliver_urn
+ node_elems = self.get_nodes({'component_id': '*%s*' % hostname})
+ if not node_elems:
+ continue
+ node_elem = node_elems[0]
+ NITOSv1Sliver.add_slivers(node_elem.element, sliver)
+
+ # remove all nodes without slivers
+ if not append:
+ for node_elem in self.get_nodes():
+ if not node_elem['slivers']:
+ parent = node_elem.element.getparent()
+ parent.remove(node_elem.element)
+
+
+ def remove_slivers(self, slivers, network=None, no_dupes=False):
+ NITOSv1Node.remove_slivers(self.xml, slivers)
+
+ def get_slice_attributes(self, network=None):
+ attributes = []
+ nodes_with_slivers = self.get_nodes_with_slivers()
+ for default_attribute in self.get_default_sliver_attributes(network):
+ attribute = default_attribute.copy()
+ attribute['node_id'] = None
+ attributes.append(attribute)
+ for node in nodes_with_slivers:
+ nodename=node['component_name']
+ sliver_attributes = self.get_sliver_attributes(nodename, network)
+ for sliver_attribute in sliver_attributes:
+ sliver_attribute['node_id'] = nodename
+ attributes.append(sliver_attribute)
+ return attributes
+
+
+ def add_sliver_attribute(self, component_id, name, value, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' % component_id})
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ slivers = NITOSv1Sliver.get_slivers(node)
+ if slivers:
+ sliver = slivers[0]
+ NITOSv1Sliver.add_sliver_attribute(sliver, name, value)
+ else:
+ # should this be an assert / raise an exception?
+ logger.error("WARNING: failed to find component_id %s" % component_id)
+
+ def get_sliver_attributes(self, component_id, network=None):
+ nodes = self.get_nodes({'component_id': '*%s*' % component_id})
+ attribs = []
+ if nodes is not None and isinstance(nodes, list) and len(nodes) > 0:
+ node = nodes[0]
+ slivers = NITOSv1Sliver.get_slivers(node.element)
+ if slivers is not None and isinstance(slivers, list) and len(slivers) > 0:
+ sliver = slivers[0]
+ attribs = NITOSv1Sliver.get_sliver_attributes(sliver.element)
+ return attribs
+
+ def remove_sliver_attribute(self, component_id, name, value, network=None):
+ attribs = self.get_sliver_attributes(component_id)
+ for attrib in attribs:
+ if attrib['name'] == name and attrib['value'] == value:
+ #attrib.element.delete()
+ parent = attrib.element.getparent()
+ parent.remove(attrib.element)
+
+ def add_default_sliver_attribute(self, name, value, network=None):
+ if network:
+ defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
+ else:
+ defaults = self.xml.xpath("//sliver_defaults")
+ if not defaults:
+ if network:
+ network_tag = self.xml.xpath("//network[@name='%s']" % network)
+ else:
+ network_tag = self.xml.xpath("//network")
+ if isinstance(network_tag, list):
+ network_tag = network_tag[0]
+ defaults = network_tag.add_element('sliver_defaults')
+ elif isinstance(defaults, list):
+ defaults = defaults[0]
+ NITOSv1Sliver.add_sliver_attribute(defaults, name, value)
+
+ def get_default_sliver_attributes(self, network=None):
+ if network:
+ defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network)
+ else:
+ defaults = self.xml.xpath("//sliver_defaults")
+ if not defaults: return []
+ return NITOSv1Sliver.get_sliver_attributes(defaults[0])
+
+ def remove_default_sliver_attribute(self, name, value, network=None):
+ attribs = self.get_default_sliver_attributes(network)
+ for attrib in attribs:
+ if attrib['name'] == name and attrib['value'] == value:
+ #attrib.element.delete()
+ parent = attrib.element.getparent()
+ parent.remove(attrib.element)
+
+ # Links
+
+ def get_links(self, network=None):
+ return PGv2Link.get_links(self.xml)
+
+ def get_link_requests(self):
+ return PGv2Link.get_link_requests(self.xml)
+
+ def add_links(self, links):
+ networks = self.get_networks()
+ if len(networks) > 0:
+ xml = networks[0].element
+ else:
+ xml = self.xml
+ PGv2Link.add_links(xml, links)
+
+ def add_link_requests(self, links):
+ PGv2Link.add_link_requests(self.xml, links)
+
+ # utility
+
+ def merge(self, in_rspec):
+ """
+ Merge contents for specified rspec with current rspec
+ """
+
+ if not in_rspec:
+ return
+
+ from sfa.rspecs.rspec import RSpec
+ if isinstance(in_rspec, RSpec):
+ rspec = in_rspec
+ else:
+ rspec = RSpec(in_rspec)
+ if rspec.version.type.lower() == 'protogeni':
+ from sfa.rspecs.rspec_converter import RSpecConverter
+ in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml())
+ rspec = RSpec(in_rspec)
+
+ # just copy over all networks
+ current_networks = self.get_networks()
+ networks = rspec.version.get_networks()
+ for network in networks:
+ current_network = network.get('name')
+ if current_network and current_network not in current_networks:
+ self.xml.append(network.element)
+ current_networks.append(current_network)
+
+ # Leases
+
+ def get_leases(self, filter=None):
+ return NITOSv1Lease.get_leases(self.xml, filter)
+
+ def add_leases(self, leases, channels, network = None, no_dupes=False):
+ NITOSv1Lease.add_leases(self.xml, leases, channels)
+
+ # Spectrum
+
+ def get_channels(self, filter=None):
+ return NITOSv1Channel.get_channels(self.xml, filter)
+
+ def add_channels(self, channels, network = None, no_dupes=False):
+ NITOSv1Channel.add_channels(self.xml, channels)
+
+
+
+if __name__ == '__main__':
+ from sfa.rspecs.rspec import RSpec
+ from sfa.rspecs.rspec_elements import *
+ r = RSpec('/tmp/resources.rspec')
+ r.load_rspec_elements(SFAv1.elements)
+ print r.get(RSpecElements.NODE)