-# $Id$
import time
from PLC.Faults import *
from PLC.Keys import Key, Keys
from PLC.SliceTags import SliceTag, SliceTags
from PLC.InitScripts import InitScript, InitScripts
-from PLC.Config import Config
+from PLC.Leases import Lease, Leases
+from PLC.Timestamp import Duration
+from PLC.Methods.GetSliceFamily import GetSliceFamily
+from PLC.PersonTags import PersonTag,PersonTags
-# XXX we don't really know whether this PLC is loaded from /etc/planetlab/plc_config or elsewhere
-plc_config = Config()
+from PLC.Accessors.Accessors_standard import *
# XXX used to check if slice expiration time is sane
MAXINT = 2L**31-1
-def get_slivers(api, slice_filter, node = None):
+# slice_filter essentially contains the slice_ids for the relevant slices (on the node + system & delegated slices)
+def get_slivers(api, caller, auth, slice_filter, node = None):
# Get slice information
slices = Slices(api, slice_filter, ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'])
# Per-node sliver attributes take precedence over global
# slice attributes, so set them first.
# Then comes nodegroup slice attributes
- # Followed by global slice attributes
+ # Followed by global slice attributes
sliver_attributes = []
if node is not None:
- for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
+ for sliver_attribute in [ a for a in slice_tags if a['node_id'] == node['node_id'] ]:
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
- # set nodegroup slice attributes
- for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
- # Do not set any nodegroup slice attributes for
+ # set nodegroup slice attributes
+ for slice_tag in [ a for a in slice_tags if a['nodegroup_id'] in node['nodegroup_ids'] ]:
+ # Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
- if slice_tag not in slice_tags:
- attributes.append({'tagname': slice_tag['tagname'],
- 'value': slice_tag['value']})
+ if slice_tag not in slice_tags:
+ attributes.append({'tagname': slice_tag['tagname'],
+ 'value': slice_tag['value']})
- for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
+ for slice_tag in [ a for a in slice_tags if a['node_id'] is None ]:
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
# checked with an assertion
if slice['expires'] > MAXINT: slice['expires']= MAXINT
+ # expose the slice vref as computed by GetSliceFamily
+ family = GetSliceFamily (api,caller).call(auth, slice['slice_id'])
+
slivers.append({
'name': slice['name'],
'slice_id': slice['slice_id'],
'instantiation': slice['instantiation'],
'expires': slice['expires'],
'keys': keys,
- 'attributes': attributes
+ 'attributes': attributes,
+ 'GetSliceFamily': family,
})
return slivers
-class v43GetSlivers(Method):
+### The pickle module, used in conjunction with caching has a restriction that it does not
+### work on "connection objects." It doesn't matter if the connection object has
+### an 'str' or 'repr' method, there is a taint check that throws an exception if
+### the pickled class is found to derive from a connection.
+### (To be moved to Method.py)
+
+def sanitize_for_pickle (obj):
+ if (isinstance(obj, dict)):
+ parent = dict(obj)
+ for k in parent.keys(): parent[k] = sanitize_for_pickle (parent[k])
+ return parent
+ elif (isinstance(obj, list)):
+ parent = list(obj)
+ parent = map(sanitize_for_pickle, parent)
+ return parent
+ else:
+ return obj
+
+class GetSlivers(Method):
"""
Returns a struct containing information about the specified node
(or calling node, if called by a node and node_id_or_hostname is
'timestamp': Parameter(int, "Timestamp of this call, in seconds since UNIX epoch"),
'node_id': Node.fields['node_id'],
'hostname': Node.fields['hostname'],
- 'networks': [Interface.fields],
+ 'interfaces': [Interface.fields],
'groups': [NodeGroup.fields['groupname']],
'conf_files': [ConfFile.fields],
- 'initscripts': [InitScript.fields],
+ 'initscripts': [InitScript.fields],
'accounts': [{
'name': Parameter(str, "unix style account name", max = 254),
'keys': [{
'tagname': SliceTag.fields['tagname'],
'value': SliceTag.fields['value']
}]
- }]
+ }],
+ # how to reach the xmpp server
+ 'xmpp': {'server':Parameter(str,"hostname for the XMPP server"),
+ 'user':Parameter(str,"username for the XMPP server"),
+ 'password':Parameter(str,"username for the XMPP server"),
+ },
+ # we consider three policies (reservation-policy)
+ # none : the traditional way to use a node
+ # lease_or_idle : 0 or 1 slice runs at a given time
+ # lease_or_shared : 1 slice is running during a lease, otherwise all the slices come back
+ 'reservation_policy': Parameter(str,"one among none, lease_or_idle, lease_or_shared"),
+ 'leases': [ { 'slice_id' : Lease.fields['slice_id'],
+ 't_from' : Lease.fields['t_from'],
+ 't_until' : Lease.fields['t_until'],
+ }],
}
def call(self, auth, node_id_or_hostname = None):
- global plc_config
+ return self.raw_call(auth, node_id_or_hostname)
+
+ def raw_call(self, auth, node_id_or_hostname):
timestamp = int(time.time())
# Get node
raise PLCInvalidArgument, "Not a local node"
# Get interface information
- networks = Interfaces(self.api, node['interface_ids'])
+ interfaces = Interfaces(self.api, node['interface_ids'])
# Get node group information
nodegroups = NodeGroups(self.api, node['nodegroup_ids']).dict('groupname')
for conf_file in all_conf_files.values():
if not conf_file['node_ids'] and not conf_file['nodegroup_ids']:
conf_files[conf_file['dest']] = conf_file
-
+
# Node group configuration files take precedence over global
# ones. If a node belongs to multiple node groups for which
# the same configuration file is defined, it is undefined
if conf_file_id in all_conf_files:
conf_file = all_conf_files[conf_file_id]
conf_files[conf_file['dest']] = conf_file
-
+
# Node configuration files take precedence over node group
# configuration files.
for conf_file_id in node['conf_file_ids']:
if conf_file_id in all_conf_files:
conf_file = all_conf_files[conf_file_id]
- conf_files[conf_file['dest']] = conf_file
+ conf_files[conf_file['dest']] = conf_file
- # Get all (enabled) initscripts
- initscripts = InitScripts(self.api, {'enabled': True})
+ # Get all (enabled) initscripts
+ initscripts = InitScripts(self.api, {'enabled': True})
# Get system slices
system_slice_tags = SliceTags(self.api, {'tagname': 'system', 'value': '1'}).dict('slice_id')
system_slice_ids = system_slice_tags.keys()
-
- # Get nm-controller slices
- controller_and_delegated_slices = Slices(self.api, {'instantiation': ['nm-controller', 'delegated']}, ['slice_id']).dict('slice_id')
- controller_and_delegated_slice_ids = controller_and_delegated_slices.keys()
- slice_ids = system_slice_ids + controller_and_delegated_slice_ids + node['slice_ids']
- slivers = get_slivers(self.api, slice_ids, node)
+ # Get nm-controller slices
+ # xxx Thierry: should these really be exposed regardless of their mapping to nodes ?
+ controller_and_delegated_slices = Slices(self.api, {'instantiation': ['nm-controller', 'delegated']}, ['slice_id']).dict('slice_id')
+ controller_and_delegated_slice_ids = controller_and_delegated_slices.keys()
+ slice_ids = system_slice_ids + controller_and_delegated_slice_ids + node['slice_ids']
+
+ slivers = get_slivers(self.api, self.caller, auth, slice_ids, node)
# get the special accounts and keys needed for the node
# root
nodes = Nodes(self.api, node['node_id'])
node = nodes[0]
- def getpersonsitekeys(site_id_or_name,theroles):
- site_filter = site_id_or_name
- site_return_filter = ['person_ids']
- sites = Sites(self.api, site_filter, site_return_filter)
- site = sites[0]
- person_filter = {'person_id':site['person_ids'],'enabled':True}
- person_return_filter = ['person_id', 'enabled', 'key_ids','role_ids']
- site_persons = Persons(self.api, person_filter, person_return_filter)
-
- # XXX This snippet below maps role names to role_ids,
- # which is really DUMB. Why can't one just pass 'roles'
- # as a return_filter to Persons() above.
- __roles = {}
- dbroles = Roles(self.api)
- for dbrole in dbroles:
- __roles[dbrole['name']]=dbrole['role_id']
- __theroles = []
- for role in theroles:
- __theroles.append(__roles[role])
- theroles=__theroles
-
- # collect the keys into a table to weed out duplicates
- site_keys = {}
- for site_person in site_persons:
- if site_person['enabled'] is False: continue
- more = True
- for role in theroles:
- if role in site_person['role_ids']:
- keys_filter = site_person['key_ids']
- keys_return_filter = ['key_id', 'key', 'key_type']
- keys = Keys(self.api, keys_filter, keys_return_filter)
- for key in keys:
- if key['key_type'] == 'ssh':
- site_keys[key['key']]=None
- return site_keys.keys()
+ # used in conjunction with reduce to flatten lists, like in
+ # reduce ( reduce_flatten_list, [ [1] , [2,3] ], []) => [ 1,2,3 ]
+ def reduce_flatten_list (x,y): return x+y
+
+ # root users are users marked with the tag 'isrootonsite'. Hack for Mlab and other sites in which admins participate in diagnosing problems.
+ def get_site_root_user_keys(api,site_id_or_name):
+ site = Sites (api,site_id_or_name,['person_ids'])[0]
+ all_site_persons = site['person_ids']
+ all_site_person_tags = PersonTags(self.api,{'person_id':all_site_persons,'tagname':'isrootonsite'},['value','person_id'])
+ site_root_person_tags = filter(lambda r:r['value']=='true',all_site_person_tags)
+ site_root_person_ids = map(lambda r:r['person_id'],site_root_person_tags)
+ key_ids = reduce (reduce_flatten_list,
+ [ p['key_ids'] for p in \
+ Persons(api,{ 'person_id':site_root_person_ids,
+ 'enabled':True, '|role_ids' : [20, 40] },
+ ['key_ids']) ],
+ [])
+ return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh']
+
+ # power users are pis and techs
+ def get_site_power_user_keys(api,site_id_or_name):
+ site = Sites (api,site_id_or_name,['person_ids'])[0]
+ key_ids = reduce (reduce_flatten_list,
+ [ p['key_ids'] for p in \
+ Persons(api,{ 'person_id':site['person_ids'],
+ 'enabled':True, '|role_ids' : [20, 40] },
+ ['key_ids']) ],
+ [])
+ return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh']
+
+ # all admins regardless of their site
+ def get_all_admin_keys(api):
+ key_ids = reduce (reduce_flatten_list,
+ [ p['key_ids'] for p in \
+ Persons(api, {'peer_id':None, 'enabled':True, '|role_ids':[10] },
+ ['key_ids']) ],
+ [])
+ return [ key['key'] for key in Keys (api, key_ids) if key['key_type']=='ssh']
# 'site_admin' account setup
- personsitekeys=getpersonsitekeys(node['site_id'],['pi','tech'])
+ personsitekeys=get_site_power_user_keys(self.api,node['site_id'])
accounts.append({'name':'site_admin','keys':personsitekeys})
- # 'root' account setup on nodes from all 'admin' users
- # registered with the PLC main site
- personsitekeys=getpersonsitekeys(plc_config.PLC_SLICE_PREFIX,['admin'])
- accounts.append({'name':'root','keys':personsitekeys})
+ # 'root' account setup on nodes from all 'admin' users and ones marked with 'isrootonsite' for this site
+ siterootkeys=get_site_root_user_keys(self.api,node['site_id'])
+ personsitekeys=get_all_admin_keys(self.api)
+ personsitekeys.extend(siterootkeys)
- node.update_last_contact()
+ accounts.append({'name':'root','keys':personsitekeys})
- return {
+ hrn = GetNodeHrn(self.api,self.caller).call(auth,node['node_id'])
+
+ # XMPP config for omf federation
+ try:
+ if not self.api.config.PLC_OMF_ENABLED:
+ raise Exception,"OMF disabled"
+ xmpp={'server':self.api.config.PLC_OMF_XMPP_SERVER,
+ 'user':self.api.config.PLC_OMF_XMPP_USER,
+ 'password':self.api.config.PLC_OMF_XMPP_PASSWORD,
+ }
+ except:
+ xmpp={'server':None,'user':None,'password':None}
+
+ node.update_last_contact()
+
+ # expose leases & reservation policy
+ # in a first implementation we only support none and lease_or_idle
+ lease_exposed_fields = [ 'slice_id', 't_from', 't_until', 'name', ]
+ leases=None
+ if node['node_type'] != 'reservable':
+ reservation_policy='none'
+ else:
+ reservation_policy='lease_or_idle'
+ # expose the leases for the next 24 hours
+ leases = [ dict ( [ (k,l[k]) for k in lease_exposed_fields ] )
+ for l in Leases (self.api, {'node_id':node['node_id'],
+ 'clip': (timestamp, timestamp+24*Duration.HOUR),
+ '-SORT': 't_from',
+ }) ]
+ granularity=self.api.config.PLC_RESERVATION_GRANULARITY
+
+ raw_data = {
'timestamp': timestamp,
'node_id': node['node_id'],
'hostname': node['hostname'],
- 'networks': networks,
+ 'interfaces': interfaces,
'groups': groups,
'conf_files': conf_files.values(),
- 'initscripts': initscripts,
+ 'initscripts': initscripts,
'slivers': slivers,
- 'accounts': accounts
- }
+ 'accounts': accounts,
+ 'xmpp':xmpp,
+ 'hrn':hrn,
+ 'reservation_policy': reservation_policy,
+ 'leases':leases,
+ 'lease_granularity': granularity,
+ }
-class v42GetSlivers(v43GetSlivers):
- """
- Legacy wrapper for v43GetSlivers.
- """
-
- def call(self, auth, node_id_or_hostname = None):
- result = v43GetSlivers.call(self,auth,node_id_or_hostname)
- networks = result['networks']
-
- for i in range(0,len(networks)):
- network = networks[i]
- if network.has_key("interface_id"):
- network['nodenetwork_id']=network['interface_id']
- if network.has_key("interface_tag_ids"):
- network['nodenetwork_setting_ids']=network['interface_tag_ids']
- networks[i]=network
-
- result['networks']=networks
- return result
-
-class GetSlivers(v42GetSlivers):
- """
- Returns a struct containing information about the specified node
- (or calling node, if called by a node and node_id_or_hostname is
- not specified), including the current set of slivers bound to the
- node.
-
- All of the information returned by this call can be gathered from
- other calls, e.g. GetNodes, GetInterfaces, GetSlices, etc. This
- function exists almost solely for the benefit of Node Manager.
- """
+ sanitized_data = sanitize_for_pickle (raw_data)
+ return sanitized_data
- pass