# -*- coding: utf-8 -*-
from nepi.core.attributes import Attribute, AttributesMap
+from nepi.core.connector import ConnectorType
+from nepi.core.factory import Factory
import sys
import getpass
-from nepi.util import validation
-from nepi.util.constants import ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP, DeploymentConfiguration
-
-# Attribute categories
-CATEGORY_DEPLOYMENT = "Deployment"
-
-class VersionedMetadataInfo(object):
+import nepi.util.environ
+from nepi.util import tags, validation
+from nepi.util.constants import ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP, \
+ DeploymentConfiguration as DC, \
+ AttributeCategories as AC
+
+class Parallel(object):
+ def __init__(self, factory, maxthreads = 64):
+ self.factory = factory
+ self.maxthreads = maxthreads
+
+class MetadataInfo(object):
@property
def connector_types(self):
""" dictionary of dictionaries with allowed connection information.
@property
def create_order(self):
""" list of factory ids that indicates the order in which the elements
- should be instantiated.
+ should be instantiated. If wrapped within a Parallel instance, they
+ will be instantiated in parallel.
"""
raise NotImplementedError
@property
def configure_order(self):
""" list of factory ids that indicates the order in which the elements
- should be configured.
+ should be configured. If wrapped within a Parallel instance, they
+ will be configured in parallel.
"""
raise NotImplementedError
@property
def preconfigure_order(self):
""" list of factory ids that indicates the order in which the elements
- should be preconfigured.
+ should be preconfigured. If wrapped within a Parallel instance, they
+ will be configured in parallel.
Default: same as configure_order
"""
@property
def prestart_order(self):
""" list of factory ids that indicates the order in which the elements
- should be prestart-configured.
+ should be prestart-configured. If wrapped within a Parallel instance, they
+ will be configured in parallel.
Default: same as configure_order
"""
@property
def start_order(self):
""" list of factory ids that indicates the order in which the elements
- should be started.
+ should be started. If wrapped within a Parallel instance, they
+ will be started in parallel.
Default: same as configure_order
"""
def factories_info(self):
""" dictionary of dictionaries of factory specific information
factory_id: dict({
- "allow_addresses": whether the box allows adding IP addresses,
- "allow_routes": wether the box allows adding routes,
- "has_addresses": whether the box allows obtaining IP addresses,
- "has_routes": wether the box allows obtaining routes,
"help": help text,
"category": category the element belongs to,
"create_function": function for element instantiation,
"""
raise NotImplementedError
+ @property
+ def testbed_id(self):
+ """ ID for the testbed """
+ raise NotImplementedError
+
+ @property
+ def testbed_version(self):
+ """ version for the testbed """
+ raise NotImplementedError
+
class Metadata(object):
- STANDARD_BOX_ATTRIBUTES = (
- ("label", dict(
- name = "label",
- validation_function = validation.is_string,
- type = Attribute.STRING,
- flags = Attribute.DesignOnly,
- help = "A unique identifier for referring to this box",
- )),
- )
-
- # Shorthand for DeploymentConfiguration
- # Syntactic sugar to shorten stuff
- DC = DeploymentConfiguration
-
- STANDARD_TESTBED_ATTRIBUTES = (
- ("home_directory", dict(
- name = "homeDirectory",
- validation_function = validation.is_string,
- help = "Path to the directory where traces and other files will be stored",
- type = Attribute.STRING,
- value = "",
- flags = Attribute.DesignOnly
- )),
- )
+ # These attributes should be added to all boxes
+ STANDARD_BOX_ATTRIBUTES = dict({
+ "label" : dict({
+ "name" : "label",
+ "validation_function" : validation.is_string,
+ "type" : Attribute.STRING,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "help" : "A unique identifier for referring to this box",
+ }),
+ })
+
+ # These are the attribute definitions for tagged attributes
+ STANDARD_TAGGED_ATTRIBUTES_DEFINITIONS = dict({
+ "maxAddresses" : dict({
+ "name" : "maxAddresses",
+ "validation_function" : validation.is_integer,
+ "type" : Attribute.INTEGER,
+ "value" : 1,
+ "flags" : Attribute.DesignReadOnly |\
+ Attribute.ExecInvisible |\
+ Attribute.Metadata,
+ "help" : "The maximum allowed number of addresses",
+ }),
+ })
+
+ # Attributes to be added to all boxes with specific tags
+ STANDARD_TAGGED_BOX_ATTRIBUTES = dict({
+ tags.ALLOW_ADDRESSES : ["maxAddresses"],
+ tags.HAS_ADDRESSES : ["maxAddresses"],
+ })
+
+ # These attributes should be added to all testbeds
+ STANDARD_TESTBED_ATTRIBUTES = dict({
+ "home_directory" : dict({
+ "name" : "homeDirectory",
+ "validation_function" : validation.is_string,
+ "help" : "Path to the directory where traces and other files will be stored",
+ "type" : Attribute.STRING,
+ "value" : "",
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ }),
+ "label" : dict({
+ "name" : "label",
+ "validation_function" : validation.is_string,
+ "type" : Attribute.STRING,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "help" : "A unique identifier for referring to this testbed",
+ }),
+ })
- DEPLOYMENT_ATTRIBUTES = (
+ # These attributes should be added to all testbeds
+ DEPLOYMENT_ATTRIBUTES = dict({
# TESTBED DEPLOYMENT ATTRIBUTES
- (DC.DEPLOYMENT_ENVIRONMENT_SETUP, dict(
- name = DC.DEPLOYMENT_ENVIRONMENT_SETUP,
- validation_function = validation.is_string,
- help = "Shell commands to run before spawning TestbedController processes",
- type = Attribute.STRING,
- flags = Attribute.DesignOnly,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.DEPLOYMENT_MODE, dict(name = DC.DEPLOYMENT_MODE,
- help = "Instance execution mode",
- type = Attribute.ENUM,
- value = DC.MODE_SINGLE_PROCESS,
- allowed = [
+ DC.DEPLOYMENT_ENVIRONMENT_SETUP : dict({
+ "name" : DC.DEPLOYMENT_ENVIRONMENT_SETUP,
+ "validation_function" : validation.is_string,
+ "help" : "Shell commands to run before spawning TestbedController processes",
+ "type" : Attribute.STRING,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.DEPLOYMENT_MODE: dict({
+ "name" : DC.DEPLOYMENT_MODE,
+ "help" : "Instance execution mode",
+ "type" : Attribute.ENUM,
+ "value" : DC.MODE_SINGLE_PROCESS,
+ "allowed" : [
DC.MODE_DAEMON,
DC.MODE_SINGLE_PROCESS
],
- flags = Attribute.DesignOnly,
- validation_function = validation.is_enum,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.DEPLOYMENT_COMMUNICATION, dict(name = DC.DEPLOYMENT_COMMUNICATION,
- help = "Instance communication mode",
- type = Attribute.ENUM,
- value = DC.ACCESS_LOCAL,
- allowed = [
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_enum,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.DEPLOYMENT_COMMUNICATION : dict({
+ "name" : DC.DEPLOYMENT_COMMUNICATION,
+ "help" : "Instance communication mode",
+ "type" : Attribute.ENUM,
+ "value" : DC.ACCESS_LOCAL,
+ "allowed" : [
DC.ACCESS_LOCAL,
DC.ACCESS_SSH
],
- flags = Attribute.DesignOnly,
- validation_function = validation.is_enum,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.DEPLOYMENT_HOST, dict(name = DC.DEPLOYMENT_HOST,
- help = "Host where the testbed will be executed",
- type = Attribute.STRING,
- value = "localhost",
- flags = Attribute.DesignOnly,
- validation_function = validation.is_string,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.DEPLOYMENT_USER, dict(name = DC.DEPLOYMENT_USER,
- help = "User on the Host to execute the testbed",
- type = Attribute.STRING,
- value = getpass.getuser(),
- flags = Attribute.DesignOnly,
- validation_function = validation.is_string,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.DEPLOYMENT_KEY, dict(name = DC.DEPLOYMENT_KEY,
- help = "Path to SSH key to use for connecting",
- type = Attribute.STRING,
- flags = Attribute.DesignOnly,
- validation_function = validation.is_string,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.DEPLOYMENT_PORT, dict(name = DC.DEPLOYMENT_PORT,
- help = "Port on the Host",
- type = Attribute.INTEGER,
- value = 22,
- flags = Attribute.DesignOnly,
- validation_function = validation.is_integer,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.ROOT_DIRECTORY, dict(name = DC.ROOT_DIRECTORY,
- help = "Root directory for storing process files",
- type = Attribute.STRING,
- value = ".",
- flags = Attribute.DesignOnly,
- validation_function = validation.is_string, # TODO: validation.is_path
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.USE_AGENT, dict(name = DC.USE_AGENT,
- help = "Use -A option for forwarding of the authentication agent, if ssh access is used",
- type = Attribute.BOOL,
- value = False,
- flags = Attribute.DesignOnly,
- validation_function = validation.is_bool,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.LOG_LEVEL, dict(name = DC.LOG_LEVEL,
- help = "Log level for instance",
- type = Attribute.ENUM,
- value = DC.ERROR_LEVEL,
- allowed = [
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_enum,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.DEPLOYMENT_HOST : dict({
+ "name" : DC.DEPLOYMENT_HOST,
+ "help" : "Host where the testbed will be executed",
+ "type" : Attribute.STRING,
+ "value" : "localhost",
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_string,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.DEPLOYMENT_USER : dict({
+ "name" : DC.DEPLOYMENT_USER,
+ "help" : "User on the Host to execute the testbed",
+ "type" : Attribute.STRING,
+ "value" : getpass.getuser(),
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_string,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.DEPLOYMENT_KEY : dict({
+ "name" : DC.DEPLOYMENT_KEY,
+ "help" : "Path to SSH key to use for connecting",
+ "type" : Attribute.STRING,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_string,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.DEPLOYMENT_PORT : dict({
+ "name" : DC.DEPLOYMENT_PORT,
+ "help" : "Port on the Host",
+ "type" : Attribute.INTEGER,
+ "value" : 22,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_integer,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.ROOT_DIRECTORY : dict({
+ "name" : DC.ROOT_DIRECTORY,
+ "help" : "Root directory for storing process files",
+ "type" : Attribute.STRING,
+ "value" : ".",
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_string, # TODO: validation.is_path
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.USE_AGENT : dict({
+ "name" : DC.USE_AGENT,
+ "help" : "Use -A option for forwarding of the authentication agent, if ssh access is used",
+ "type" : Attribute.BOOL,
+ "value" : False,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_bool,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.USE_SUDO : dict({
+ "name" : DC.USE_SUDO,
+ "help" : "Use sudo to run the deamon process. This option only take flace when the server runs in daemon mode.",
+ "type" : Attribute.BOOL,
+ "value" : False,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_bool,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.LOG_LEVEL : dict({
+ "name" : DC.LOG_LEVEL,
+ "help" : "Log level for instance",
+ "type" : Attribute.ENUM,
+ "value" : DC.ERROR_LEVEL,
+ "allowed" : [
DC.ERROR_LEVEL,
DC.DEBUG_LEVEL
],
- flags = Attribute.DesignOnly,
- validation_function = validation.is_enum,
- category = CATEGORY_DEPLOYMENT,
- )),
- (DC.RECOVER, dict(name = DC.RECOVER,
- help = "Do not intantiate testbeds, rather, reconnect to already-running instances. Used to recover from a dead controller.",
- type = Attribute.BOOL,
- value = False,
- flags = Attribute.DesignOnly,
- validation_function = validation.is_bool,
- category = CATEGORY_DEPLOYMENT,
- )),
- )
-
- STANDARD_TESTBED_ATTRIBUTES += DEPLOYMENT_ATTRIBUTES
-
- del DC
-
-
- STANDARD_ATTRIBUTE_BUNDLES = {
- "tun_proto" : dict({
- "name": "tun_proto",
- "help": "TUNneling protocol used",
- "type": Attribute.STRING,
- "flags": Attribute.Invisible,
- "validation_function": validation.is_string,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_enum,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ DC.RECOVERY_POLICY : dict({
+ "name" : DC.RECOVERY_POLICY,
+ "help" : "Specifies what action to take in the event of a failure.",
+ "type" : Attribute.ENUM,
+ "value" : DC.POLICY_FAIL,
+ "allowed" : [
+ DC.POLICY_FAIL,
+ DC.POLICY_RECOVER,
+ DC.POLICY_RESTART,
+ ],
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_enum,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ })
+ PROXY_ATTRIBUTES = dict({
+ DC.RECOVER : dict({
+ "name" : DC.RECOVER,
+ "help" : "Do not intantiate testbeds, rather, reconnect to already-running instances. Used to recover from a dead controller.",
+ "type" : Attribute.BOOL,
+ "value" : False,
+ "flags" : Attribute.ExecReadOnly |\
+ Attribute.ExecImmutable |\
+ Attribute.Metadata,
+ "validation_function" : validation.is_bool,
+ "category" : AC.CATEGORY_DEPLOYMENT,
+ }),
+ })
+ PROXY_ATTRIBUTES.update(DEPLOYMENT_ATTRIBUTES)
+
+ # These attributes could appear in the boxes attribute list
+ STANDARD_BOX_ATTRIBUTE_DEFINITIONS = dict({
+ "tun_proto" : dict({
+ "name" : "tun_proto",
+ "help" : "TUNneling protocol used",
+ "type" : Attribute.STRING,
+ "flags" : Attribute.DesignInvisible | \
+ Attribute.ExecInvisible | \
+ Attribute.ExecImmutable | \
+ Attribute.Metadata,
+ "validation_function" : validation.is_string,
+ }),
+ "tun_key" : dict({
+ "name" : "tun_key",
+ "help" : "Randomly selected TUNneling protocol cryptographic key. "
+ "Endpoints must agree to use the minimum (in lexicographic order) "
+ "of both the remote and local sides.",
+ "type" : Attribute.STRING,
+ "flags" : Attribute.DesignInvisible | \
+ Attribute.ExecInvisible | \
+ Attribute.ExecImmutable | \
+ Attribute.Metadata,
+ "validation_function" : validation.is_string,
}),
- "tun_key" : dict({
- "name": "tun_key",
- "help": "Randomly selected TUNneling protocol cryptographic key. "
- "Endpoints must agree to use the minimum (in lexicographic order) "
- "of both the remote and local sides.",
- "type": Attribute.STRING,
- "flags": Attribute.Invisible,
- "validation_function": validation.is_string,
+ "tun_addr" : dict({
+ "name": "tun_addr",
+ "help" : "Address (IP, unix socket, whatever) of the tunnel endpoint",
+ "type" : Attribute.STRING,
+ "flags" : Attribute.DesignInvisible | \
+ Attribute.ExecInvisible | \
+ Attribute.ExecImmutable | \
+ Attribute.Metadata,
+ "validation_function" : validation.is_string,
}),
- "tun_addr" : dict({
- "name": "tun_addr",
- "help": "Address (IP, unix socket, whatever) of the tunnel endpoint",
- "type": Attribute.STRING,
- "flags": Attribute.Invisible,
- "validation_function": validation.is_string,
+ "tun_port" : dict({
+ "name" : "tun_port",
+ "help" : "IP port of the tunnel endpoint",
+ "type" : Attribute.INTEGER,
+ "flags" : Attribute.DesignInvisible | \
+ Attribute.ExecInvisible | \
+ Attribute.ExecImmutable | \
+ Attribute.Metadata,
+ "validation_function" : validation.is_integer,
}),
- "tun_port" : dict({
- "name": "tun_port",
- "help": "IP port of the tunnel endpoint",
- "type": Attribute.INTEGER,
- "flags": Attribute.Invisible,
- "validation_function": validation.is_integer,
+ "tun_cipher" : dict({
+ "name" : "tun_cipher",
+ "help" : "Cryptographic cipher used for tunnelling",
+ "type" : Attribute.ENUM,
+ "value" : "AES",
+ "allowed" : [
+ "AES",
+ "Blowfish",
+ "DES3",
+ "DES",
+ "PLAIN",
+ ],
+ "flags" : Attribute.ExecImmutable,
+ "validation_function" : validation.is_enum,
}),
- ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP : dict({
- "name": ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,
- "help": "Commands to set up the environment needed to run NEPI testbeds",
- "type": Attribute.STRING,
- "flags": Attribute.Invisible,
- "validation_function": validation.is_string
+ ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP : dict({
+ "name" : ATTR_NEPI_TESTBED_ENVIRONMENT_SETUP,
+ "help" : "Commands to set up the environment needed to run NEPI testbeds",
+ "type" : Attribute.STRING,
+ "flags" : Attribute.DesignInvisible | \
+ Attribute.ExecInvisible | \
+ Attribute.ExecImmutable | \
+ Attribute.Metadata,
+ "validation_function" : validation.is_string
}),
- }
+ })
+ STANDARD_TESTBED_ATTRIBUTES.update(DEPLOYMENT_ATTRIBUTES.copy())
- def __init__(self, testbed_id, version):
- self._version = version
+ def __init__(self, testbed_id):
self._testbed_id = testbed_id
- metadata_module = self._load_versioned_metadata_module()
- self._metadata = metadata_module.VersionedMetadataInfo()
+ metadata_module = self._load_metadata_module()
+ self._metadata = metadata_module.MetadataInfo()
+ if testbed_id != self._metadata.testbed_id:
+ raise RuntimeError("Bad testbed id. Asked for %s, got %s" % \
+ (testbed_id, self._metadata.testbed_id ))
@property
def create_order(self):
def start_order(self):
return self._metadata.start_order
+ @property
+ def testbed_version(self):
+ return self._metadata.testbed_version
+
+ @property
+ def testbed_id(self):
+ return self._testbed_id
+
+ @property
+ def supported_recovery_policies(self):
+ return self._metadata.supported_recovery_policies
+
def testbed_attributes(self):
attributes = AttributesMap()
-
- # standard attributes
- self._add_standard_attributes(attributes, None, True, False,
- self.STANDARD_TESTBED_ATTRIBUTES)
-
- # custom attributes - they override standard ones
- for attr_info in self._metadata.testbed_attributes.values():
- name = attr_info["name"]
- help = attr_info["help"]
- type = attr_info["type"]
- value = attr_info["value"] if "value" in attr_info else None
- range = attr_info["range"] if "range" in attr_info else None
- allowed = attr_info["allowed"] if "allowed" in attr_info else None
- flags = attr_info["flags"] if "flags" in attr_info \
- else Attribute.NoFlags
- validation_function = attr_info["validation_function"]
- category = attr_info["category"] if "category" in attr_info else None
- attributes.add_attribute(name, help, type, value,
- range, allowed, flags, validation_function, category)
-
+ testbed_attributes = self._testbed_attributes()
+ self._add_attributes(attributes.add_attribute, testbed_attributes)
return attributes
- def build_design_factories(self):
- from nepi.core.design import Factory
- factories = list()
- for factory_id, info in self._metadata.factories_info.iteritems():
- help = info["help"]
- category = info["category"]
- allow_addresses = info.get("allow_addresses", False)
- allow_routes = info.get("allow_routes", False)
- has_addresses = info.get("has_addresses", False)
- has_routes = info.get("has_routes", False)
- factory = Factory(factory_id,
- allow_addresses, has_addresses,
- allow_routes, has_routes,
- help, category)
-
- # standard attributes
- self._add_standard_attributes(factory, info, True, True,
- self.STANDARD_BOX_ATTRIBUTES)
-
- # custom attributes - they override standard ones
- self._add_attributes(factory, info, "factory_attributes")
- self._add_attributes(factory, info, "box_attributes", True)
-
- self._add_design_traces(factory, info)
- self._add_tags(factory, info)
- self._add_design_connector_types(factory, info)
- factories.append(factory)
- return factories
-
- def build_execute_factories(self):
- from nepi.core.execute import Factory
+ def build_factories(self):
factories = list()
for factory_id, info in self._metadata.factories_info.iteritems():
create_function = info.get("create_function")
configure_function = info.get("configure_function")
preconfigure_function = info.get("preconfigure_function")
prestart_function = info.get("prestart_function")
- allow_addresses = info.get("allow_addresses", False)
- allow_routes = info.get("allow_routes", False)
- has_addresses = info.get("has_addresses", False)
- has_routes = info.get("has_routes", False)
- factory = Factory(factory_id, create_function, start_function,
- stop_function, status_function,
- configure_function, preconfigure_function,
+ help = info["help"]
+ category = info["category"]
+ factory = Factory(factory_id,
+ create_function,
+ start_function,
+ stop_function,
+ status_function,
+ configure_function,
+ preconfigure_function,
prestart_function,
- allow_addresses, has_addresses,
- allow_routes, has_routes)
+ help,
+ category)
- # standard attributes
- self._add_standard_attributes(factory, info, False, True,
- self.STANDARD_BOX_ATTRIBUTES)
-
- # custom attributes - they override standard ones
- self._add_attributes(factory, info, "factory_attributes")
- self._add_attributes(factory, info, "box_attributes", True)
+ factory_attributes = self._factory_attributes(info)
+ self._add_attributes(factory.add_attribute, factory_attributes)
+ box_attributes = self._box_attributes(info)
+ self._add_attributes(factory.add_box_attribute, box_attributes)
- self._add_execute_traces(factory, info)
+ self._add_traces(factory, info)
self._add_tags(factory, info)
- self._add_execute_connector_types(factory, info)
+ self._add_connector_types(factory, info)
factories.append(factory)
return factories
- def _load_versioned_metadata_module(self):
- mod_name = "nepi.testbeds.%s.metadata_v%s" % (self._testbed_id.lower(),
- self._version)
+ def _load_metadata_module(self):
+ mod_name = nepi.util.environ.find_testbed(self._testbed_id) + ".metadata"
if not mod_name in sys.modules:
__import__(mod_name)
return sys.modules[mod_name]
- def _add_standard_attributes(self, factory, info, design, box, STANDARD_ATTRIBUTES):
- if design:
- attr_bundle = STANDARD_ATTRIBUTES
+ def _testbed_attributes(self):
+ # standar attributes
+ attributes = self.STANDARD_TESTBED_ATTRIBUTES.copy()
+ # custom attributes
+ attributes.update(self._metadata.testbed_attributes.copy())
+ return attributes
+
+ def _factory_attributes(self, info):
+ tagged_attributes = self._tagged_attributes(info)
+ if "factory_attributes" in info:
+ definitions = self._metadata.attributes.copy()
+ # filter attributes corresponding to the factory_id
+ factory_attributes = self._filter_attributes(info["factory_attributes"],
+ definitions)
else:
- # Only add non-DesignOnly attributes
- def nonDesign(attr_info):
- return not (attr_info[1].get('flags',Attribute.NoFlags) & Attribute.DesignOnly)
- attr_bundle = filter(nonDesign, STANDARD_ATTRIBUTES)
- self._add_attributes(factory, info, None, box,
- attr_bundle = STANDARD_ATTRIBUTES)
-
- def _add_attributes(self, factory, info, attr_key, box_attributes = False, attr_bundle = ()):
- if not attr_bundle and info and attr_key in info:
- definitions = self.STANDARD_ATTRIBUTE_BUNDLES.copy()
+ factory_attributes = dict()
+ attributes = dict(tagged_attributes.items() + \
+ factory_attributes.items())
+ return attributes
+
+ def _box_attributes(self, info):
+ tagged_attributes = self._tagged_attributes(info)
+ if "box_attributes" in info:
+ definitions = self.STANDARD_BOX_ATTRIBUTE_DEFINITIONS.copy()
definitions.update(self._metadata.attributes)
- attr_bundle = [ (attr_id, definitions[attr_id])
- for attr_id in info[attr_key] ]
- for attr_id, attr_info in attr_bundle:
+ box_attributes = self._filter_attributes(info["box_attributes"],
+ definitions)
+ else:
+ box_attributes = dict()
+ attributes = dict(tagged_attributes.items() + \
+ box_attributes.items())
+ attributes.update(self.STANDARD_BOX_ATTRIBUTES.copy())
+ return attributes
+
+ def _tagged_attributes(self, info):
+ tagged_attributes = dict()
+ for tag_id in info.get("tags", []):
+ if tag_id in self.STANDARD_TAGGED_BOX_ATTRIBUTES:
+ attr_list = self.STANDARD_TAGGED_BOX_ATTRIBUTES[tag_id]
+ attributes = self._filter_attributes(attr_list,
+ self.STANDARD_TAGGED_ATTRIBUTES_DEFINITIONS)
+ tagged_attributes.update(attributes)
+ return tagged_attributes
+
+ def _filter_attributes(self, attr_list, definitions):
+ # filter attributes not corresponding to the factory
+ attributes = dict((attr_id, definitions[attr_id]) \
+ for attr_id in attr_list)
+ return attributes
+
+ def _add_attributes(self, add_attr_func, attributes):
+ for attr_id, attr_info in attributes.iteritems():
name = attr_info["name"]
help = attr_info["help"]
type = attr_info["type"]
- value = attr_info["value"] if "value" in attr_info else None
- range = attr_info["range"] if "range" in attr_info else None
- allowed = attr_info["allowed"] if "allowed" in attr_info \
- else None
- flags = attr_info["flags"] if "flags" in attr_info \
- and attr_info["flags"] != None \
- else Attribute.NoFlags
+ value = attr_info.get("value")
+ range = attr_info.get("range")
+ allowed = attr_info.get("allowed")
+ flags = attr_info.get("flags")
validation_function = attr_info["validation_function"]
- category = attr_info["category"] if "category" in attr_info else None
- if box_attributes:
- factory.add_box_attribute(name, help, type, value, range,
- allowed, flags, validation_function, category)
- else:
- factory.add_attribute(name, help, type, value, range,
- allowed, flags, validation_function, category)
-
- def _add_design_traces(self, factory, info):
- if "traces" in info:
- for trace in info["traces"]:
- trace_info = self._metadata.traces[trace]
- trace_id = trace_info["name"]
- help = trace_info["help"]
- factory.add_trace(trace_id, help)
-
- def _add_execute_traces(self, factory, info):
- if "traces" in info:
- for trace in info["traces"]:
- trace_info = self._metadata.traces[trace]
- trace_id = trace_info["name"]
- factory.add_trace(trace_id)
+ category = attr_info.get("category")
+ add_attr_func(name, help, type, value, range, allowed, flags,
+ validation_function, category)
- def _add_tags(self, factory, info):
- if "tags" in info:
- for tag_id in info["tags"]:
- factory.add_tag(tag_id)
+ def _add_traces(self, factory, info):
+ for trace_id in info.get("traces", []):
+ trace_info = self._metadata.traces[trace_id]
+ name = trace_info["name"]
+ help = trace_info["help"]
+ factory.add_trace(name, help)
- def _add_design_connector_types(self, factory, info):
- from nepi.core.design import ConnectorType
- if "connector_types" in info:
- connections = dict()
- for connection in self._metadata.connections:
- from_ = connection["from"]
- to = connection["to"]
- can_cross = connection["can_cross"]
- if from_ not in connections:
- connections[from_] = list()
- if to not in connections:
- connections[to] = list()
- connections[from_].append((to, can_cross))
- connections[to].append((from_, can_cross))
- for connector_id in info["connector_types"]:
- connector_type_info = self._metadata.connector_types[
- connector_id]
- name = connector_type_info["name"]
- help = connector_type_info["help"]
- max = connector_type_info["max"]
- min = connector_type_info["min"]
- testbed_id = self._testbed_id
- factory_id = factory.factory_id
- connector_type = ConnectorType(testbed_id, factory_id, name,
- help, max, min)
- for (to, can_cross) in connections[(testbed_id, factory_id,
- name)]:
- (testbed_id_to, factory_id_to, name_to) = to
- connector_type.add_allowed_connection(testbed_id_to,
- factory_id_to, name_to, can_cross)
- factory.add_connector_type(connector_type)
+ def _add_tags(self, factory, info):
+ for tag_id in info.get("tags", []):
+ factory.add_tag(tag_id)
- def _add_execute_connector_types(self, factory, info):
- from nepi.core.execute import ConnectorType
+ def _add_connector_types(self, factory, info):
if "connector_types" in info:
from_connections = dict()
to_connections = dict()
for connection in self._metadata.connections:
- from_ = connection["from"]
- to = connection["to"]
+ froms = connection["from"]
+ tos = connection["to"]
can_cross = connection["can_cross"]
- init_code = connection["init_code"] \
- if "init_code" in connection else None
- compl_code = connection["compl_code"] \
- if "compl_code" in connection else None
- if from_ not in from_connections:
- from_connections[from_] = list()
- if to not in to_connections:
- to_connections[to] = list()
- from_connections[from_].append((to, can_cross, init_code,
- compl_code))
- to_connections[to].append((from_, can_cross, init_code,
- compl_code))
+ init_code = connection.get("init_code")
+ compl_code = connection.get("compl_code")
+
+ for from_ in _expand(froms):
+ for to in _expand(tos):
+ if from_ not in from_connections:
+ from_connections[from_] = list()
+ if to not in to_connections:
+ to_connections[to] = list()
+ from_connections[from_].append((to, can_cross, init_code,
+ compl_code))
+ to_connections[to].append((from_, can_cross, init_code,
+ compl_code))
for connector_id in info["connector_types"]:
connector_type_info = self._metadata.connector_types[
connector_id]
name = connector_type_info["name"]
+ help = connector_type_info["help"]
max = connector_type_info["max"]
min = connector_type_info["min"]
testbed_id = self._testbed_id
factory_id = factory.factory_id
connector_type = ConnectorType(testbed_id, factory_id, name,
- max, min)
+ help, max, min)
connector_key = (testbed_id, factory_id, name)
if connector_key in to_connections:
for (from_, can_cross, init_code, compl_code) in \
compl_code)
factory.add_connector_type(connector_type)
+
+def _expand(val):
+ """
+ Expands multiple values in the "val" tuple to create cross products:
+
+ >>> list(_expand((1,2,3)))
+ [(1, 2, 3)]
+ >>> list(_expand((1,(2,4,5),3)))
+ [(1, 2, 3), (1, 4, 3), (1, 5, 3)]
+ >>> list(_expand(((1,2),(2,4,5),3)))
+ [(1, 2, 3), (1, 4, 3), (1, 5, 3), (2, 2, 3), (2, 4, 3), (2, 5, 3)]
+ """
+ if not val:
+ yield ()
+ elif isinstance(val[0], (list,set,tuple)):
+ for x in val[0]:
+ x = (x,)
+ for e_val in _expand(val[1:]):
+ yield x + e_val
+ else:
+ x = (val[0],)
+ for e_val in _expand(val[1:]):
+ yield x + e_val
+