# description/category area is unlikely to make it into the database
#
-# slice vref
-# xxx - don't expose yet in api interface and slices dont know how to use that yet
+#
+### system slices - at least planetflow - still rely on 'vref'
+#
+
+# These following accessors are mostly of interest for implementing the
+# GetSliceFamily method, that takes into account the vref attribute,
+# as well as the 3 attributes below, and the PLC_FLAVOUR config category
+
+### slice vref
define_accessors(current_module, Slice, "Vref", "vref",
- "slice/config", "vserver reference image type",
+ "slice/config", "vserver reference image name",
get_roles=all_roles, set_roles=["admin"], expose_in_api=True)
+# xxx the accessor factory should enforce the category and descriptions provided here.
+# and BTW the tag should be created right away as far as possible, or at least when a Get is issued
+# also note that the two 'arch' instances use here will end in the same unique TagType object,
+# so you should make sure to use the same category/description for both
+define_accessors(current_module, Slice, "Arch", "arch",
+ "node/slice/config", "node arch or slivers arch",
+ get_roles=all_roles, set_roles=["user"], expose_in_api=True)
+define_accessors(current_module, Slice, "Pldistro", "pldistro",
+ "node/slice/config", "PlanetLab distribution to use for node or slivers",
+ get_roles=all_roles, set_roles=["admin"], expose_in_api=True)
+define_accessors(current_module, Slice, "Fcdistro", "fcdistro",
+ "node/slice/config", "Fedora or CentOS distribution to use for node or slivers",
+ get_roles=all_roles, set_roles=["admin"], expose_in_api=True)
# node architecture
define_accessors(current_module, Node, "Arch", "arch",
- "node/config", "architecture name",
+ "node/slice/config", "node arch or slivers arch",
get_roles=all_roles, set_roles=tech_roles, expose_in_api=True)
# distribution to be deployed
define_accessors(current_module, Node, "Pldistro", "pldistro",
- "node/config", "PlanetLab distribution",
+ "node/slice/config", "PlanetLab distribution to use for node or slivers",
get_roles=all_roles, set_roles=["admin"], expose_in_api=True)
# node deployment (alpha, beta, ...)
define_accessors(current_module, Node, "Deployment", "deployment",
# Get slice information
slices = Slices(self.api, [slice_id_or_name])
if not slices:
- raise PLCInvalidArgument, "No such slice"
+ raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name
slice = slices[0]
if slice['peer_id'] is not None:
returns = Parameter(int, '1 if successful')
+ # xxx this method is a bit spamming the events log
+ # todo : log only when a change occurs
+ # also this seems to expect the user-provided node_fields to optionally
+ # contain the 'primary_network' key that should be renamed into 'primary_interface'
def call(self, auth, node_fields):
# Update node state
if node_fields.has_key('boot_state'):
--- /dev/null
+# $Id$
+# $URL$
+from PLC.Method import Method
+from PLC.Auth import Auth
+from PLC.Faults import *
+from PLC.Parameter import *
+from PLC.Slices import Slice, Slices
+
+from PLC.Accessors.Accessors_standard import * # import slice accessors
+
+class GetSliceFamily(Method):
+ """
+ Returns the slice vserver reference image that a given slice
+ should be based on. This depends on the global PLC settings in the
+ PLC_FLAVOUR area, optionnally overridden by any of the 'vref',
+ 'arch', 'pldistro', 'fcdistro' tag if set on the slice.
+ """
+
+ roles = ['admin', 'user', 'node']
+
+ # don't support sliver-specific settings yet
+ accepts = [
+ Auth(),
+ Mixed(Slice.fields['slice_id'],
+ Slice.fields['name']),
+ ]
+
+ returns = Parameter (str, "the slice-family this slice should be based upon")
+
+ #
+ ### system slices - at least planetflow - still rely on 'vref'
+ #
+ def call(self, auth, slice_id_or_name):
+ # Get slice information
+ slices = Slices(self.api, [slice_id_or_name])
+ if not slices:
+ raise PLCInvalidArgument, "No such slice %r"%slice_id_or_name
+ slice = slices[0]
+ slice_id = slice['slice_id']
+
+ # the vref tag, if set, wins
+ vref = GetSliceVref (self.api).call(auth,slice_id)
+ if vref: return vref
+
+ arch = GetSliceArch (self.api).call(auth,slice_id)
+ if not arch: arch = self.api.config.PLC_FLAVOUR_SLICE_ARCH
+
+ pldistro = GetSlicePldistro (self.api).call(auth, slice_id)
+ if not pldistro: pldistro = self.api.config.PLC_FLAVOUR_SLICE_PLDISTRO
+
+ fcdistro = GetSliceFcdistro (self.api).call(auth, slice_id)
+ if not fcdistro: fcdistro = self.api.config.PLC_FLAVOUR_SLICE_FCDISTRO
+
+ # xxx would make sense to check the corresponding vserver rpms are available
+ # in all node-families yum repos (and yumgroups, btw)
+ return "%s-%s-%s"%(pldistro,fcdistro,arch)
data = {
'timestamp': int(time.time()),
'initscripts': initscripts,
- 'slivers': get_slivers(self.api, [slice['slice_id']]),
+ 'slivers': get_slivers(self.api, auth, [slice['slice_id']]),
}
# Sign ticket
from PLC.Keys import Key, Keys
from PLC.SliceTags import SliceTag, SliceTags
from PLC.InitScripts import InitScript, InitScripts
+from PLC.Methods.GetSliceFamily import GetSliceFamily
# XXX used to check if slice expiration time is sane
MAXINT = 2L**31-1
-def get_slivers(api, slice_filter, node = None):
+def get_slivers(api, auth, slice_filter, node = None):
# Get slice information
slices = Slices(api, slice_filter, ['slice_id', 'name', 'instantiation', 'expires', 'person_ids', 'slice_tag_ids'])
sliver_attributes = []
if node is not None:
- for sliver_attribute in filter(lambda a: a['node_id'] == node['node_id'], slice_tags):
+ for sliver_attribute in [ a for a in slice_tags if a['node_id'] == node['node_id'] ]:
sliver_attributes.append(sliver_attribute['tagname'])
attributes.append({'tagname': sliver_attribute['tagname'],
'value': sliver_attribute['value']})
# set nodegroup slice attributes
- for slice_tag in filter(lambda a: a['nodegroup_id'] in node['nodegroup_ids'], slice_tags):
+ for slice_tag in [ a for a in slice_tags if a['nodegroup_id'] in node['nodegroup_ids'] ]:
# Do not set any nodegroup slice attributes for
# which there is at least one sliver attribute
# already set.
attributes.append({'tagname': slice_tag['tagname'],
'value': slice_tag['value']})
- for slice_tag in filter(lambda a: a['node_id'] is None, slice_tags):
+ for slice_tag in [ a for a in slice_tags if a['node_id'] is None ]:
# Do not set any global slice attributes for
# which there is at least one sliver attribute
# already set.
# checked with an assertion
if slice['expires'] > MAXINT: slice['expires']= MAXINT
+ # expose the slice vref as computed by GetSliceFamily
+ family = GetSliceFamily (api).call(auth, slice['slice_id'])
+
slivers.append({
'name': slice['name'],
'slice_id': slice['slice_id'],
'instantiation': slice['instantiation'],
'expires': slice['expires'],
'keys': keys,
- 'attributes': attributes
+ 'attributes': attributes,
+ 'GetSliceFamily': family,
})
return slivers
system_slice_ids = system_slice_tags.keys()
# Get nm-controller slices
+ # xxx Thierry: should these really be exposed regardless of their mapping to nodes ?
controller_and_delegated_slices = Slices(self.api, {'instantiation': ['nm-controller', 'delegated']}, ['slice_id']).dict('slice_id')
controller_and_delegated_slice_ids = controller_and_delegated_slices.keys()
slice_ids = system_slice_ids + controller_and_delegated_slice_ids + node['slice_ids']
- slivers = get_slivers(self.api, slice_ids, node)
+ slivers = get_slivers(self.api, auth, slice_ids, node)
# get the special accounts and keys needed for the node
# root
GetSessions
GetSiteTags
GetSites
+GetSliceFamily
GetSliceInstantiations
GetSliceKeys
GetSliceTags