6 #from sfa.util.config import Config
7 from sfa.util.xrn import hrn_to_urn, urn_to_hrn, urn_to_sliver_id
8 from sfa.planetlab.plxrn import PlXrn, hostname_to_urn, slicename_to_hrn
10 from sfa.rspecs.rspec import RSpec
11 from sfa.rspecs.elements.location import Location
12 from sfa.rspecs.elements.hardware_type import HardwareType
13 from sfa.rspecs.elements.node import Node
14 #from sfa.rspecs.elements.login import Login
15 #from sfa.rspecs.elements.services import Services
16 from sfa.rspecs.elements.sliver import Sliver
17 from sfa.rspecs.elements.lease import Lease
18 from sfa.rspecs.elements.granularity import Granularity
19 from sfa.rspecs.version_manager import VersionManager
21 #from sfa.util.sfatime import datetime_to_epoch
24 from sfa.util.sfalogging import logger
27 def hostname_to_hrn(root_auth, login_base, hostname):
28 return PlXrn(auth=root_auth, hostname=login_base + '_' +hostname).get_hrn()
43 def __init__(self, driver):
46 def get_slice_and_slivers(self, slice_xrn):
48 Returns a dict of slivers keyed on the sliver's node_id
53 return (sfa_slice, slivers)
54 slice_urn = hrn_to_urn(slice_xrn, 'slice')
55 slice_hrn, _ = urn_to_hrn(slice_xrn)
56 slice_name = slice_hrn
58 slices = self.driver.GetSlices(slice_filter= str(slice_name), \
59 slice_filter_type = 'slice_hrn')
60 logger.debug("Slabaggregate api \tget_slice_and_slivers slices %s " \
63 return (sfa_slice, slivers)
64 if isinstance(sfa_slice, list):
69 # sort slivers by node id , if there is a job
70 #and therfore, node allocated to this slice
71 if sfa_slice['oar_job_id'] is not -1:
74 for node_id in sfa_slice['node_ids']:
75 #node_id = self.driver.root_auth + '.' + node_id
76 sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, \
77 sfa_slice['record_id_slice'], node_id),
78 'name': sfa_slice['slice_hrn'],
81 slivers[node_id] = sliver
83 logger.log_exc("SLABAGGREGATE \t \
84 get_slice_and_slivers KeyError ")
85 ## sort sliver attributes by node id
86 ##tags = self.driver.GetSliceTags({'slice_tag_id': slice['slice_tag_ids']})
88 ### most likely a default/global sliver attribute (node_id == None)
89 ##if tag['node_id'] not in slivers:
90 ##sliver = Sliver({'sliver_id': urn_to_sliver_id(slice_urn, slice['slice_id'], ""),
93 ##slivers[tag['node_id']] = sliver
94 ##slivers[tag['node_id']]['tags'].append(tag)
95 logger.debug("SLABAGGREGATE api get_slice_and_slivers slivers %s "\
97 return (sfa_slice, slivers)
101 def get_nodes(self, slices=None, slivers=[], options={}):
102 # NT: the semantic of this function is not clear to me :
103 # if slice is not defined, then all the nodes should be returned
104 # if slice is defined, we should return only the nodes that
105 # are part of this slice
106 # but what is the role of the slivers parameter ?
107 # So i assume that slice['node_ids'] will be the same as slivers for us
111 # get the granularity in second for the reservation system
112 grain = self.driver.GetLeaseGranularity()
114 # Commenting this part since all nodes should be returned,
115 # even if a slice is provided
117 # if 'node_ids' in slice and slice['node_ids']:
118 # #first case, a non empty slice was provided
119 # filter['hostname'] = slice['node_ids']
120 # tags_filter=filter.copy()
121 # nodes = self.driver.GetNodes(filter['hostname'])
123 # #second case, a slice was provided, but is empty
126 # #third case, no slice was provided
127 # nodes = self.driver.GetNodes()
128 nodes = self.driver.GetNodes()
129 #geni_available = options.get('geni_available')
131 #filter['boot_state'] = 'boot'
133 #filter.update({'peer_id': None})
134 #nodes = self.driver.GetNodes(filter['hostname'])
141 #site_ids.append(node['site_id'])
142 #interface_ids.extend(node['interface_ids'])
143 #tag_ids.extend(node['node_tag_ids'])
144 nodes_dict[node['node_id']] = node
147 #sites_dict = self.get_sites({'site_id': site_ids})
149 #interfaces = self.get_interfaces({'interface_id':interface_ids})
151 #node_tags = self.get_node_tags(tags_filter)
154 reserved_nodes = self.driver.GetNodesCurrentlyInUse()
157 # skip whitelisted nodes
158 #if node['slice_ids_whitelist']:
159 #if not slice or slice['slice_id'] not in node['slice_ids_whitelist']:
162 # xxx how to retrieve site['login_base']
163 #site_id=node['site_id']
164 #site=sites_dict[site_id]
165 rspec_node['component_id'] = \
166 hostname_to_urn(self.driver.root_auth, \
167 node['site'], node['hostname'])
168 rspec_node['component_name'] = node['hostname']
169 rspec_node['component_manager_id'] = \
170 hrn_to_urn(self.driver.root_auth, 'authority+sa')
171 #rspec_node['component_manager_id'] = Xrn(self.driver.root_auth, 'authority+sa').get_urn()
172 rspec_node['authority_id'] = \
173 hrn_to_urn(PlXrn.site_hrn(self.driver.root_auth, \
174 node['site']), 'authority+sa')
175 # do not include boot state (<available> element) in the manifest rspec
178 # rspec_node['boot_state'] = node['boot_state']
179 # if node['hostname'] in reserved_nodes:
180 # rspec_node['boot_state'] = "Reserved"
181 rspec_node['boot_state'] = node['boot_state']
182 if node['hostname'] in reserved_nodes:
183 rspec_node['boot_state'] = "Reserved"
184 rspec_node['exclusive'] = 'True'
185 rspec_node['hardware_types'] = [HardwareType({'name': 'slab-node'})]
187 # only doing this because protogeni rspec needs
188 # to advertise available initscripts
189 #rspec_node['pl_initscripts'] = None
190 # add site/interface info to nodes.
191 # assumes that sites, interfaces and tags have already been prepared.
192 #site = sites_dict[node['site_id']]
195 if node['posx'] and node['posy']:
196 location = Location({'longitude':node['posx'], \
197 'latitude': node['posy']})
198 rspec_node['location'] = location
201 #rspec_node['interfaces'] = []
203 #for if_id in node['interface_ids']:
204 #interface = Interface(interfaces[if_id])
205 #interface['ipv4'] = interface['ip']
206 #interface['component_id'] = PlXrn(auth=self.driver.hrn,
207 #interface='node%s:eth%s' % (node['node_id'], if_count)).get_urn()
208 # interfaces in the manifest need a client id
210 #interface['client_id'] = "%s:%s" % (node['node_id'], if_id)
211 #rspec_node['interfaces'].append(interface)
214 #tags = [PLTag(node_tags[tag_id]) for tag_id in node['node_tag_ids']]
216 granularity = Granularity({'grain': grain})
217 rspec_node['granularity'] = granularity
218 rspec_node['tags'] = []
219 if node['hostname'] in slivers:
221 sliver = slivers[node['hostname']]
222 rspec_node['sliver_id'] = sliver['sliver_id']
223 rspec_node['client_id'] = node['hostname']
224 rspec_node['slivers'] = [sliver]
226 # slivers always provide the ssh service
227 #login = Login({'authentication': 'ssh-keys', 'hostname': node['hostname'], 'port':'22', 'username': sliver['name']})
228 #service = Services({'login': login})
229 #rspec_node['services'] = [service]
230 rspec_nodes.append(rspec_node)
234 def get_leases(self, slice_record = None, options = {}):
236 now = int(time.time())
237 lease_filter = {'clip': now }
239 #lease_filter.update({'name': slice_record['name']})
240 return_fields = ['lease_id', 'hostname', 'site_id', \
241 'name', 'start_time', 'duration']
242 #leases = self.driver.GetLeases(lease_filter)
243 leases = self.driver.GetLeases()
244 grain = self.driver.GetLeaseGranularity()
248 #as many leases as there are nodes in the job
249 for node in lease['reserved_nodes']:
250 rspec_lease = Lease()
251 rspec_lease['lease_id'] = lease['lease_id']
252 site = node['site_id']
253 rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
254 site, node['hostname'])
255 rspec_lease['slice_id'] = lease['slice_id']
256 rspec_lease['t_from'] = lease['t_from']
257 rspec_lease['t_until'] = lease['t_until']
258 rspec_leases.append(rspec_lease)
263 #for lease in leases:
265 #rspec_lease = Lease()
267 ## xxx how to retrieve site['login_base']
269 #rspec_lease['lease_id'] = lease['lease_id']
270 #rspec_lease['component_id'] = hostname_to_urn(self.driver.hrn, \
271 #site['login_base'], lease['hostname'])
272 #slice_hrn = slicename_to_hrn(self.driver.hrn, lease['name'])
273 #slice_urn = hrn_to_urn(slice_hrn, 'slice')
274 #rspec_lease['slice_id'] = slice_urn
275 #rspec_lease['t_from'] = lease['t_from']
276 #rspec_lease['t_until'] = lease['t_until']
277 #rspec_leases.append(rspec_lease)
279 #from plc/aggregate.py
280 def get_rspec(self, slice_xrn=None, version = None, options={}):
283 version_manager = VersionManager()
284 version = version_manager.get_version(version)
285 logger.debug("SlabAggregate \t get_rspec ***version %s \
286 version.type %s version.version %s options %s \r\n" \
287 %(version,version.type,version.version,options))
290 rspec_version = version_manager._get_version(version.type, \
291 version.version, 'ad')
294 rspec_version = version_manager._get_version(version.type, \
295 version.version, 'manifest')
297 slices, slivers = self.get_slice_and_slivers(slice_xrn)
298 #at this point sliver may be empty if no senslab job is running for this user/slice.
299 rspec = RSpec(version=rspec_version, user_options=options)
302 #if slice and 'expires' in slice:
303 #rspec.xml.set('expires', datetime_to_epoch(slice['expires']))
304 # add sliver defaults
305 #nodes, links = self.get_nodes(slice, slivers)
306 if not options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'leases':
307 nodes = self.get_nodes(slices, slivers)
308 rspec.version.add_nodes(nodes)
310 default_sliver = slivers.get(None, [])
312 default_sliver_attribs = default_sliver.get('tags', [])
313 logger.debug("SlabAggregate \tget_rspec **** \
314 default_sliver_attribs %s \r\n" %(default_sliver_attribs))
315 for attrib in default_sliver_attribs:
316 logger.debug("SlabAggregate \tget_rspec ******* attrib %s \r\n"\
319 rspec.version.add_default_sliver_attribute(attrib['tagname'], \
321 if options.get('list_leases') or options.get('list_leases') and options['list_leases'] != 'resources':
322 leases = self.get_leases(slices)
323 rspec.version.add_leases(leases)