X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Fplanetlab%2Fpldriver.py;h=0978a576df3f5a1d5cdaaaa14c188171aa6b413d;hb=b54e90d0c3868fa5afb89ff784cba0d5d36265ad;hp=248bab16b87b3dc6a5d96a1e0e4205802251f3ac;hpb=fc2a216091686513bcd89006108b3b43a0527be8;p=sfa.git diff --git a/sfa/planetlab/pldriver.py b/sfa/planetlab/pldriver.py index 248bab16..0978a576 100644 --- a/sfa/planetlab/pldriver.py +++ b/sfa/planetlab/pldriver.py @@ -1,7 +1,8 @@ import datetime # from sfa.util.faults import MissingSfaInfo, UnknownSfaType, \ - RecordNotFound, SfaNotImplemented, SliverDoesNotExist, SearchFailed + RecordNotFound, SfaNotImplemented, SliverDoesNotExist, SearchFailed, \ + UnsupportedOperation, Forbidden from sfa.util.sfalogging import logger from sfa.util.defaultdict import defaultdict from sfa.util.sfatime import utcparse, datetime_to_string, datetime_to_epoch @@ -9,8 +10,8 @@ from sfa.util.xrn import Xrn, hrn_to_urn, get_leaf from sfa.util.cache import Cache # one would think the driver should not need to mess with the SFA db, but.. -from sfa.storage.alchemy import dbsession from sfa.storage.model import RegRecord, SliverAllocation +from sfa.trust.credential import Credential # used to be used in get_ticket #from sfa.trust.sfaticket import SfaTicket @@ -23,7 +24,7 @@ from sfa.planetlab.plshell import PlShell import sfa.planetlab.peers as peers from sfa.planetlab.plaggregate import PlAggregate from sfa.planetlab.plslices import PlSlices -from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname +from sfa.planetlab.plxrn import PlXrn, slicename_to_hrn, hostname_to_hrn, hrn_to_pl_slicename, xrn_to_hostname, top_auth, hash_loginbase def list_to_dict(recs, key): @@ -43,15 +44,58 @@ class PlDriver (Driver): # the cache instance is a class member so it survives across incoming requests cache = None - def __init__ (self, config): - Driver.__init__ (self, config) + def __init__ (self, api): + Driver.__init__ (self, api) + config=api.config self.shell = PlShell (config) self.cache=None if config.SFA_AGGREGATE_CACHING: if PlDriver.cache is None: PlDriver.cache = Cache() self.cache = PlDriver.cache + + def sliver_to_slice_xrn(self, xrn): + sliver_id_parts = Xrn(xrn).get_sliver_id_parts() + filter = {} + try: + filter['slice_id'] = int(sliver_id_parts[0]) + except ValueError: + fliter['name'] = sliver_id_parts[0] + slices = self.shell.GetSlices(filter) + if not slices: + raise Forbidden("Unable to locate slice record for sliver: %s" % xrn) + slice = slices[0] + slice_xrn = PlXrn(auth=self.hrn, slicename=slice['name']) + return slice_xrn + def check_sliver_credentials(self, creds, urns): + # build list of cred object hrns + slice_cred_names = [] + for cred in creds: + slice_cred_hrn = Credential(cred=cred).get_gid_object().get_hrn() + slice_cred_names.append(PlXrn(xrn=slice_cred_hrn).pl_slicename()) + + # look up slice name of slivers listed in urns arg + slice_ids = [] + for urn in urns: + sliver_id_parts = Xrn(xrn=urn).get_sliver_id_parts() + try: + slice_ids.append(int(sliver_id_parts[0])) + except ValueError: + pass + + if not slice_ids: + raise Forbidden("sliver urn not provided") + + slices = self.shell.GetSlices(slice_ids) + sliver_names = [slice['name'] for slice in slices] + + # make sure we have a credential for every specified sliver ierd + for sliver_name in sliver_names: + if sliver_name not in slice_cred_names: + msg = "Valid credential not found for target: %s" % sliver_name + raise Forbidden(msg) + ######################################## ########## registry oriented ######################################## @@ -71,6 +115,7 @@ class PlDriver (Driver): if 'max_slices' not in pl_record: pl_record['max_slices']=2 pointer = self.shell.AddSite(pl_record) + self.shell.SetSiteHrn(int(pointer), hrn) else: pointer = sites[0]['site_id'] @@ -82,6 +127,7 @@ class PlDriver (Driver): slices = self.shell.GetSlices([pl_record['name']]) if not slices: pointer = self.shell.AddSlice(pl_record) + self.shell.SetSliceHrn(int(pointer), hrn) else: pointer = slices[0]['slice_id'] @@ -90,7 +136,11 @@ class PlDriver (Driver): if not persons: for key in ['first_name','last_name']: if key not in sfa_record: sfa_record[key]='*from*sfa*' - pointer = self.shell.AddPerson(dict(sfa_record)) + # AddPerson does not allow everything to be set + can_add = ['first_name', 'last_name', 'title','email', 'password', 'phone', 'url', 'bio'] + add_person_dict=dict ( [ (k,sfa_record[k]) for k in sfa_record if k in can_add ] ) + pointer = self.shell.AddPerson(add_person_dict) + self.shell.SetPersonHrn(int(pointer), hrn) else: pointer = persons[0]['person_id'] @@ -117,10 +167,11 @@ class PlDriver (Driver): self.shell.AddPersonKey(pointer, {'key_type' : 'ssh', 'key' : pub_key}) elif type == 'node': - login_base = PlXrn(xrn=sfa_record['authority'],type='node').pl_login_base() + login_base = PlXrn(xrn=sfa_record['authority'],type='authority').pl_login_base() nodes = self.shell.GetNodes([pl_record['hostname']]) if not nodes: pointer = self.shell.AddNode(login_base, pl_record) + self.shell.SetNodeHrn(int(pointer), hrn) else: pointer = nodes[0]['node_id'] @@ -131,6 +182,7 @@ class PlDriver (Driver): def update (self, old_sfa_record, new_sfa_record, hrn, new_key): pointer = old_sfa_record['pointer'] type = old_sfa_record['type'] + new_key_pointer = None # new_key implemented for users only if new_key and type not in [ 'user' ]: @@ -138,12 +190,14 @@ class PlDriver (Driver): if (type == "authority"): self.shell.UpdateSite(pointer, new_sfa_record) + self.shell.SetSiteHrn(pointer, hrn) elif type == "slice": pl_record=self.sfa_fields_to_pl_fields(type, hrn, new_sfa_record) if 'name' in pl_record: pl_record.pop('name') self.shell.UpdateSlice(pointer, pl_record) + self.shell.SetSliceHrn(pointer, hrn) elif type == "user": # SMBAKER: UpdatePerson only allows a limited set of fields to be @@ -161,6 +215,7 @@ class PlDriver (Driver): if 'email' in update_fields and not update_fields['email']: del update_fields['email'] self.shell.UpdatePerson(pointer, update_fields) + self.shell.SetPersonHrn(pointer, hrn) if new_key: # must check this key against the previous one if it exists @@ -169,20 +224,19 @@ class PlDriver (Driver): keys = person['key_ids'] keys = self.shell.GetKeys(person['key_ids']) - # Delete all stale keys key_exists = False for key in keys: - if new_key != key['key']: - self.shell.DeleteKey(key['key_id']) - else: + if new_key == key['key']: key_exists = True + new_key_pointer = key['key_id'] + break if not key_exists: - self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key}) + new_key_pointer = self.shell.AddPersonKey(pointer, {'key_type': 'ssh', 'key': new_key}) elif type == "node": self.shell.UpdateNode(pointer, new_sfa_record) - return True + return (pointer, new_key_pointer) ########## @@ -454,7 +508,7 @@ class PlDriver (Driver): # get the registry records person_list, persons = [], {} - person_list = dbsession.query (RegRecord).filter(RegRecord.pointer.in_(person_ids)) + person_list = self.api.dbsession().query (RegRecord).filter(RegRecord.pointer.in_(person_ids)) # create a hrns keyed on the sfa record's pointer. # Its possible for multiple records to have the same pointer so # the dict's value will be a list of hrns. @@ -568,16 +622,18 @@ class PlDriver (Driver): rspec = aggregate.list_resources(version=version, options=options) return rspec - def describe(self, urns, version, options={}, allocation_status=None): + def describe(self, urns, version, options={}): aggregate = PlAggregate(self) return aggregate.describe(urns, version=version, options=options) def status (self, urns, options={}): aggregate = PlAggregate(self) - desc = aggregate.describe(urns) - return desc['geni_slivers'] + desc = aggregate.describe(urns, version='GENI 3') + status = {'geni_urn': desc['geni_urn'], + 'geni_slivers': desc['geni_slivers']} + return status - def allocate (self, urn, rspec_string, options={}): + def allocate (self, urn, rspec_string, expiration, options={}): xrn = Xrn(urn) aggregate = PlAggregate(self) slices = PlSlices(self) @@ -595,161 +651,142 @@ class PlDriver (Driver): # ensure site record exists site = slices.verify_site(xrn.hrn, slice_record, peer, sfa_peer, options=options) # ensure slice record exists - slice = slices.verify_slice(xrn.hrn, slice_record, peer, sfa_peer, options=options) + slice = slices.verify_slice(xrn.hrn, slice_record, peer, sfa_peer, expiration=expiration, options=options) # ensure person records exists persons = slices.verify_persons(xrn.hrn, slice, users, peer, sfa_peer, options=options) # ensure slice attributes exists slices.verify_slice_attributes(slice, requested_attributes, options=options) - + # add/remove slice from nodes - requested_slivers = [] - for node in rspec.version.get_nodes_with_slivers(): - hostname = None - if node.get('component_name'): - hostname = node.get('component_name').strip() - elif node.get('component_id'): - hostname = xrn_to_hostname(node.get('component_id').strip()) - if hostname: - requested_slivers.append(hostname) - nodes = slices.verify_slice_nodes(slice, requested_slivers, peer) - - # update all sliver allocation states setting then to geni_allocated - sliver_state_updated = {} - for node in nodes: - sliver_hrn = '%s.%s-%s' % (self.hrn, slice['slice_id'], node['node_id']) - sliver_id = Xrn(sliver_hrn, type='sliver').urn - sliver_state_updated[sliver_id] = False - - constraint = SliverAllocation.sliver_id.in_(sliver_state_updated.keys()) - cur_sliver_allocations = dbsession.query(SliverAllocation).filter(constraint) - for sliver_allocation in cur_sliver_allocations: - sliver_allocation.allocation_state = 'geni_allocated' - sliver_state_updated[sliver_allocation.sliver_id] = True - dbsession.commit() - - # Some states may not have been updated becuase no sliver allocation state record - # exists for the sliver. Insert new allocation records for these slivers and set - # it to geni_allocated. - for (sliver_id, state_updated) in sliver_state_updated.items(): - if state_updated == False: - record = SliverAllocation(sliver_id=sliver_id, allocation_state='geni_allocated') - dbsession.add(record) - dbsession.commit() - + request_nodes = rspec.version.get_nodes_with_slivers() + nodes = slices.verify_slice_nodes(urn, slice, request_nodes, peer) + # add/remove links links slices.verify_slice_links(slice, rspec.version.get_link_requests(), nodes) # add/remove leases - requested_leases = [] - kept_leases = [] - for lease in rspec.version.get_leases(): - requested_lease = {} - if not lease.get('lease_id'): - requested_lease['hostname'] = xrn_to_hostname(lease.get('component_id').strip()) - requested_lease['start_time'] = lease.get('start_time') - requested_lease['duration'] = lease.get('duration') - else: - kept_leases.append(int(lease['lease_id'])) - if requested_lease.get('hostname'): - requested_leases.append(requested_lease) + rspec_requested_leases = rspec.version.get_leases() + leases = slices.verify_slice_leases(slice, rspec_requested_leases, peer) - leases = slices.verify_slice_leases(slice, requested_leases, kept_leases, peer) # handle MyPLC peer association. # only used by plc and ple. - slices.handle_peer(site, slice, persons, peer) + slices.handle_peer(site, slice, None, peer) return aggregate.describe([xrn.get_urn()], version=rspec.version) def provision(self, urns, options={}): - # update sliver allocation states and set them to geni_provisioned + # update users + slices = PlSlices(self) aggregate = PlAggregate(self) slivers = aggregate.get_slivers(urns) + slice = slivers[0] + peer = slices.get_peer(slice['hrn']) + sfa_peer = slices.get_sfa_peer(slice['hrn']) + users = options.get('geni_users', []) + persons = slices.verify_persons(slice['hrn'], slice, users, peer, sfa_peer, options=options) + slices.handle_peer(None, None, persons, peer) + # update sliver allocation states and set them to geni_provisioned sliver_ids = [sliver['sliver_id'] for sliver in slivers] - constraint = SliverAllocation.sliver_id.in_(sliver_ids) - cur_sliver_allocations = dbsession.query(SliverAllocation).filter(constraint) - for sliver_allocation in cur_sliver_allocations: - sliver_allocation.allocation_state = 'geni_provisioned' - dbsession.commit() - - return self.describe(urns, None, options=options) + dbsession=self.api.dbsession() + SliverAllocation.set_allocations(sliver_ids, 'geni_provisioned',dbsession) + version_manager = VersionManager() + rspec_version = version_manager.get_version(options['geni_rspec_version']) + return self.describe(urns, rspec_version, options=options) def delete(self, urns, options={}): - + # collect sliver ids so we can update sliver allocation states after + # we remove the slivers. aggregate = PlAggregate(self) slivers = aggregate.get_slivers(urns) - slice_id = slivers[0]['slice_id'] - node_ids = [] - sliver_ids = [] - for sliver in slivers: - node_ids.append(sliver['node_id']) - sliver_ids.append(sliver['sliver_id']) - - # determine if this is a peer slice - # xxx I wonder if this would not need to use PlSlices.get_peer instead - # in which case plc.peers could be deprecated as this here - # is the only/last call to this last method in plc.peers - slice_hrn = PlXrn(auth=self.hrn, slicename=slivers[0]['name']).get_hrn() - peer = peers.get_peer(self, slice_hrn) - try: - if peer: - self.shell.UnBindObjectFromPeer('slice', slice_id, peer) - - self.shell.DeleteSliceFromNodes(slice_id, node_ids) - - # update slivera allocation states - constraint = SliverAllocation.sliver_id.in_(sliver_ids) - cur_sliver_allocations = dbsession.query(SliverAllocation).filter(constraint) - for sliver_allocation in cur_sliver_allocations: - dbsession.delete(sliver_allocation) - dbsession.commit() - finally: - if peer: - self.shell.BindObjectToPeer('slice', slice_id, peer, slice['peer_slice_id']) + if slivers: + slice_id = slivers[0]['slice_id'] + slice_name = slivers[0]['name'] + node_ids = [] + sliver_ids = [] + for sliver in slivers: + node_ids.append(sliver['node_id']) + sliver_ids.append(sliver['sliver_id']) + + # leases + leases = self.shell.GetLeases({'name': slice_name}) + leases_ids = [lease['lease_id'] for lease in leases ] + + # determine if this is a peer slice + # xxx I wonder if this would not need to use PlSlices.get_peer instead + # in which case plc.peers could be deprecated as this here + # is the only/last call to this last method in plc.peers + #slice_hrn = PlXrn(auth=self.hrn, slice_name).get_hrn() + slice_hrn = self.shell.GetSliceHrn(int(slice_id)) + peer = peers.get_peer(self, slice_hrn) + try: + if peer: + self.shell.UnBindObjectFromPeer('slice', slice_id, peer) + + self.shell.DeleteSliceFromNodes(slice_id, node_ids) + if len(leases_ids) > 0: + self.shell.DeleteLeases(leases_ids) + + # delete sliver allocation states + dbsession=self.api.dbsession() + SliverAllocation.delete_allocations(sliver_ids,dbsession) + finally: + if peer: + self.shell.BindObjectToPeer('slice', slice_id, peer, slice['peer_slice_id']) # prepare return struct geni_slivers = [] - for node_id in node_ids: - sliver_hrn = '%s.%s-%s' % (self.hrn, slice_id, node_id) + for sliver in slivers: geni_slivers.append( - {'geni_sliver_urn': Xrn(sliver_hrn, type='sliver').urn, + {'geni_sliver_urn': sliver['sliver_id'], 'geni_allocation_status': 'geni_unallocated', - 'geni_expires': datetime_to_string(utcparse(slivers[0]['expires']))}) + 'geni_expires': datetime_to_string(utcparse(sliver['expires']))}) return geni_slivers - + def renew (self, urns, expiration_time, options={}): - # we can only renew slices, not individual slivers. ignore sliver - # ids in the urn - names = [] - for urn in urns: - xrn = PlXrn(xrn=urn, type='slice') - names.append(xrn.pl_slicename()) - slices = self.shell.GetSlices(names, ['slice_id']) - if not slices: + aggregate = PlAggregate(self) + slivers = aggregate.get_slivers(urns) + if not slivers: raise SearchFailed(urns) - slice = slices[0] + slice = slivers[0] requested_time = utcparse(expiration_time) record = {'expires': int(datetime_to_epoch(requested_time))} - try: - self.shell.UpdateSlice(slice['slice_id'], record) - return True - except: - return False + self.shell.UpdateSlice(slice['slice_id'], record) + description = self.describe(urns, 'GENI 3', options) + return description['geni_slivers'] + def perform_operational_action (self, urns, action, options={}): # MyPLC doesn't support operational actions. Lets pretend like it # supports start, but reject everything else. action = action.lower() - if action == 'geni_start': - pass - else: + if action not in ['geni_start']: raise UnsupportedOperation(action) - description = self.describe(urns, None, options) - return description['geni_slivers'] + + # fault if sliver is not full allocated (operational status is geni_pending_allocation) + description = self.describe(urns, 'GENI 3', options) + for sliver in description['geni_slivers']: + if sliver['geni_operational_status'] == 'geni_pending_allocation': + raise UnsupportedOperation(action, "Sliver must be fully allocated (operational status is not geni_pending_allocation)") + # + # Perform Operational Action Here + # + + geni_slivers = self.describe(urns, 'GENI 3', options)['geni_slivers'] + return geni_slivers # set the 'enabled' tag to 0 def shutdown (self, xrn, options={}): - xrn = PlXrn(xrn=xrn, type='slice') - slicename = xrn.pl_slicename() + hrn, _ = urn_to_hrn(xrn) + top_auth_hrn = top_auth(hrn) + site_hrn = '.'.join(hrn.split('.')[:-1]) + slice_part = hrn.split('.')[-1] + if top_auth_hrn == self.driver.hrn: + login_base = slice_hrn.split('.')[-2][:12] + else: + login_base = hash_loginbase(site_hrn) + + slicename = '_'.join([login_base, slice_part]) + slices = self.shell.GetSlices({'name': slicename}, ['slice_id']) if not slices: raise RecordNotFound(slice_hrn)