- conf_files[conf_file['dest']] = all_conf_files[conf_file_id]
-
- # filter out any slices in this nodes slice_id list that may be invalid
- # (i.e. expired slices)
- slice_ids = dict.fromkeys(filter(lambda slice_id: slice_id in all_slice_ids, node['slice_ids']))
-
- # If not a foreign node, add all of our default system
- # slices to it.
- if node['peer_id'] is None:
- slice_ids.update(system_slice_ids)
-
- slivers = []
-
- for slice in map(lambda id: all_slices[id], slice_ids.keys()):
- keys = []
- ### still missing in foreign slices
- try:
- for person in map(lambda id: all_persons[id], slice['person_ids']):
- keys += [{'key_type': all_keys[key_id]['key_type'],
- 'key': all_keys[key_id]['key']} \
- for key_id in person['key_ids']]
- except:
- keys += [{'key_type':'missing',
- 'key':'key caching not implemented yet'}]
-
- sliver_attributes = []
- attributes = []
- ### still missing in foreign slices
- try:
- slice_attributes = map(lambda id: all_slice_attributes[id],
- slice['slice_attribute_ids'])
-
- # Per-node sliver attributes take precedence over
- # global slice attributes, so set them first.
- for sliver_attribute in filter(lambda a: a['node_id'] == node_id, slice_attributes):
- sliver_attributes.append(sliver_attribute['name'])
- attributes.append({'name': sliver_attribute['name'],
- 'value': sliver_attribute['value']})
-
- for slice_attribute in filter(lambda a: a['node_id'] is None, slice_attributes):
- # Do not set any global slice attributes for
- # which there is at least one sliver attribute
- # already set.
- if slice_attribute['name'] not in sliver_attributes:
- attributes.append({'name': slice_attribute['name'],
- 'value': slice_attribute['value']})
- except Exception, err:
- attributes=[{'name':'attributes caching','value':'not implemented yet'}]
-
- slivers.append({
- 'name': slice['name'],
- 'slice_id': slice['slice_id'],
- 'instantiation': slice['instantiation'],
- 'expires': slice['expires'],
- 'keys': keys,
- 'attributes': attributes
- })
-
- nodes.append({
- 'timestamp': timestamp,
- 'node_id': node['node_id'],
- 'hostname': node['hostname'],
- 'networks': networks,
- 'groups': groups,
- 'conf_files': conf_files.values(),
- 'slivers': slivers
- })
-
- return nodes
+ conf_file = all_conf_files[conf_file_id]
+ conf_files[conf_file['dest']] = conf_file
+
+ # Node configuration files take precedence over node group
+ # configuration files.
+ for conf_file_id in node['conf_file_ids']:
+ if conf_file_id in all_conf_files:
+ conf_file = all_conf_files[conf_file_id]
+ conf_files[conf_file['dest']] = conf_file
+
+ # Get all (enabled) initscripts
+ initscripts = InitScripts(self.api, {'enabled': True})
+
+ # Get system slices
+ system_slice_tags = SliceTags(self.api, {'tagname': 'system', 'value': '1'}).dict('slice_id')
+ system_slice_ids = system_slice_tags.keys()
+
+ # Get nm-controller slices
+ # xxx Thierry: should these really be exposed regardless of their mapping to nodes ?
+ controller_and_delegated_slices = Slices(self.api, {'instantiation': ['nm-controller', 'delegated']}, ['slice_id']).dict('slice_id')
+ controller_and_delegated_slice_ids = controller_and_delegated_slices.keys()
+ slice_ids = system_slice_ids + controller_and_delegated_slice_ids + node['slice_ids']
+
+ slivers = get_slivers(self.api, self.caller, auth, slice_ids, node)
+
+ # get the special accounts and keys needed for the node
+ # root
+ # site_admin
+ accounts = []
+ if False and 'site_id' not in node:
+ nodes = Nodes(self.api, node['node_id'])
+ node = nodes[0]
+
+ # used in conjunction with reduce to flatten lists, like in
+ # reduce ( reduce_flatten_list, [ [1] , [2,3] ], []) => [ 1,2,3 ]
+ def reduce_flatten_list (x,y): return x+y
+
+ # root users are users marked with the tag 'isrootonsite'. Hack for Mlab and other sites in which admins participate in diagnosing problems.
+ def get_site_root_user_keys(api,site_id_or_name):
+ site = Sites (api,site_id_or_name,['person_ids'])[0]
+ all_site_persons = site['person_ids']
+ all_site_person_tags = PersonTags(self.api,{'person_id':all_site_persons,'tagname':'isrootonsite'},['value','person_id'])
+ site_root_person_tags = filter(lambda r:r['value']=='true',all_site_person_tags)
+ site_root_person_ids = map(lambda r:r['person_id'],site_root_person_tags)
+ key_ids = reduce (reduce_flatten_list,
+ [ p['key_ids'] for p in \
+ Persons(api,{ 'person_id':site_root_person_ids,
+ 'enabled':True, '|role_ids' : [20, 40] },
+ ['key_ids']) ],
+ [])
+ return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh']
+
+ # power users are pis and techs
+ def get_site_power_user_keys(api,site_id_or_name):
+ site = Sites (api,site_id_or_name,['person_ids'])[0]
+ key_ids = reduce (reduce_flatten_list,
+ [ p['key_ids'] for p in \
+ Persons(api,{ 'person_id':site['person_ids'],
+ 'enabled':True, '|role_ids' : [20, 40] },
+ ['key_ids']) ],
+ [])
+ return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh']
+
+ # all admins regardless of their site
+ def get_all_admin_keys(api):
+ key_ids = reduce (reduce_flatten_list,
+ [ p['key_ids'] for p in \
+ Persons(api, {'peer_id':None, 'enabled':True, '|role_ids':[10] },
+ ['key_ids']) ],
+ [])
+ return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh']
+
+ # 'site_admin' account setup
+ personsitekeys=get_site_power_user_keys(self.api,node['site_id'])
+ accounts.append({'name':'site_admin','keys':personsitekeys})
+
+ # 'root' account setup on nodes from all 'admin' users and ones marked with 'isrootonsite' for this site
+ siterootkeys=get_site_root_user_keys(self.api,node['site_id'])
+ personsitekeys=get_all_admin_keys(self.api)
+ personsitekeys.extend(siterootkeys)
+
+ accounts.append({'name':'root','keys':personsitekeys})
+
+ hrn = GetNodeHrn(self.api,self.caller).call(auth,node['node_id'])
+
+ # XMPP config for omf federation
+ try:
+ if not self.api.config.PLC_OMF_ENABLED:
+ raise Exception,"OMF not enabled"
+ xmpp={'server':self.api.config.PLC_OMF_XMPP_SERVER}
+ except:
+ xmpp={'server':None}
+
+ node.update_last_contact()
+
+ # expose leases & reservation policy
+ # in a first implementation we only support none and lease_or_idle
+ lease_exposed_fields = [ 'slice_id', 't_from', 't_until', 'name', ]
+ leases=None
+ if node['node_type'] != 'reservable':
+ reservation_policy='none'
+ else:
+ reservation_policy='lease_or_idle'
+ # expose the leases for the next 24 hours
+ leases = [ dict ( [ (k,l[k]) for k in lease_exposed_fields ] )
+ for l in Leases (self.api, {'node_id':node['node_id'],
+ 'clip': (timestamp, timestamp+24*Duration.HOUR),
+ '-SORT': 't_from',
+ }) ]
+ granularity=self.api.config.PLC_RESERVATION_GRANULARITY
+
+ raw_data = {
+ 'timestamp': timestamp,
+ 'node_id': node['node_id'],
+ 'hostname': node['hostname'],
+ 'interfaces': interfaces,
+ 'groups': groups,
+ 'conf_files': conf_files.values(),
+ 'initscripts': initscripts,
+ 'slivers': slivers,
+ 'accounts': accounts,
+ 'xmpp':xmpp,
+ 'hrn':hrn,
+ 'reservation_policy': reservation_policy,
+ 'leases':leases,
+ 'lease_granularity': granularity,
+ }
+
+ sanitized_data = sanitize_for_pickle (raw_data)
+ return sanitized_data
+